From c797ba2b655647f0ab5333f5ccb1442e37ed7055 Mon Sep 17 00:00:00 2001 From: tangwang Date: Tue, 9 Dec 2025 11:24:06 +0800 Subject: [PATCH] 1. 增量索引接口,增加删除操作后,响应接口的调整 因为请求改成了两个list, --- api/routes/indexer.py | 6 ++++++ docs/搜索API对接指南.md | 154 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++------------------- indexer/incremental_service.py | 286 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 3 files changed, 248 insertions(+), 198 deletions(-) diff --git a/api/routes/indexer.py b/api/routes/indexer.py index 2ae2cc3..937fbdf 100644 --- a/api/routes/indexer.py +++ b/api/routes/indexer.py @@ -73,6 +73,12 @@ async def index_spus(request: IndexSpusRequest): - 数据库是唯一真实来源(Single Source of Truth) - 自动检测:查询数据库时发现deleted=1,自动从ES删除 - 显式删除:调用方明确知道哪些SPU要删除,直接删除(适用于批量删除场景) + + 响应格式: + - spu_ids: spu_ids对应的响应列表,每个元素包含spu_id和status(indexed/deleted/failed) + - delete_spu_ids: delete_spu_ids对应的响应列表,每个元素包含spu_id和status(deleted/not_found/failed) + - failed状态的元素会包含msg字段说明失败原因 + - 最后给出总体统计:total, success_count, failed_count等 """ try: from ..app import get_incremental_service, get_es_client diff --git a/docs/搜索API对接指南.md b/docs/搜索API对接指南.md index 36e943b..261dda1 100644 --- a/docs/搜索API对接指南.md +++ b/docs/搜索API对接指南.md @@ -131,7 +131,7 @@ curl -X POST "http://120.76.41.98:6002/search/" \ | 即时搜索 | GET | `/search/instant` | 边输入边搜索(框架) ⚠️ TODO | | 获取文档 | GET | `/search/{doc_id}` | 获取单个文档 | | 全量重建索引 | POST | `/indexer/reindex` | 全量重建索引接口 | -| 增量索引 | POST | `/indexer/index` | 增量索引接口(指定SPU ID列表进行索引) | +| 增量索引 | POST | `/indexer/index` | 增量索引接口(指定SPU ID列表进行索引,支持自动检测删除和显式删除) | | 查询文档 | POST | `/indexer/documents` | 查询SPU文档数据(不写入ES) | | 索引健康检查 | GET | `/indexer/health` | 检查索引服务状态 | | 健康检查 | GET | `/admin/health` | 服务健康检查 | @@ -918,54 +918,107 @@ cat logs/indexer.log | jq 'select(.operation == "request_complete") | {timestamp - **端点**: `POST /indexer/index` - **描述**: 增量索引接口,根据指定的SPU ID列表进行索引,直接将数据写入ES。用于增量更新指定商品。 +**支持两种删除方式**: +1. **自动检测删除**:如果SPU在数据库中被标记为`deleted=1`,自动从ES中删除对应文档 +2. **显式删除**:通过`delete_spu_ids`参数显式指定要删除的SPU(无论数据库状态如何) + +**删除策略说明**: +- 数据库是唯一真实来源(Single Source of Truth) +- 自动检测:查询数据库时发现`deleted=1`,自动从ES删除 +- 显式删除:调用方明确知道哪些SPU要删除,直接删除(适用于批量删除场景) + #### 请求参数 ```json { "tenant_id": "162", - "spu_ids": ["123", "456", "789"] + "spu_ids": ["123", "456", "789"], + "delete_spu_ids": ["100", "101"] } ``` | 参数 | 类型 | 必填 | 说明 | |------|------|------|------| | `tenant_id` | string | Y | 租户ID | -| `spu_ids` | array[string] | Y | SPU ID列表(1-100个) | +| `spu_ids` | array[string] | N | SPU ID列表(1-100个),要索引的SPU。如果为空,则只执行删除操作 | +| `delete_spu_ids` | array[string] | N | 显式指定要删除的SPU ID列表(1-100个),可选。无论数据库状态如何,都会从ES中删除这些SPU | + +**注意**: +- `spu_ids` 和 `delete_spu_ids` 不能同时为空 +- 每个列表最多支持100个SPU ID +- 如果SPU在`spu_ids`中且数据库`deleted=1`,会自动从ES删除(自动检测删除) #### 响应格式 ```json { - "success": [ + "spu_ids": [ { "spu_id": "123", - "document": { - "tenant_id": "162", - "spu_id": "123", - "title_zh": "商品标题", - ... - } + "status": "indexed" }, { "spu_id": "456", - "document": {...} + "status": "deleted" + }, + { + "spu_id": "789", + "status": "failed", + "msg": "SPU not found (unexpected)" } ], - "failed": [ + "delete_spu_ids": [ { - "spu_id": "789", - "error": "SPU not found or deleted" + "spu_id": "100", + "status": "deleted" + }, + { + "spu_id": "101", + "status": "not_found" + }, + { + "spu_id": "102", + "status": "failed", + "msg": "Failed to delete from ES: Connection timeout" } ], - "total": 3, - "success_count": 2, - "failed_count": 1 + "total": 6, + "success_count": 4, + "failed_count": 2, + "elapsed_time": 1.23, + "index_name": "search_products", + "tenant_id": "162" } ``` +| 字段 | 类型 | 说明 | +|------|------|------| +| `spu_ids` | array | spu_ids对应的响应列表,每个元素包含 `spu_id` 和 `status` | +| `spu_ids[].status` | string | 状态:`indexed`(已索引)、`deleted`(已删除,自动检测)、`failed`(失败) | +| `spu_ids[].msg` | string | 当status为`failed`时,包含失败原因(可选) | +| `delete_spu_ids` | array | delete_spu_ids对应的响应列表,每个元素包含 `spu_id` 和 `status` | +| `delete_spu_ids[].status` | string | 状态:`deleted`(已删除)、`not_found`(ES中不存在)、`failed`(失败) | +| `delete_spu_ids[].msg` | string | 当status为`failed`时,包含失败原因(可选) | +| `total` | integer | 总处理数量(spu_ids数量 + delete_spu_ids数量) | +| `success_count` | integer | 成功数量(indexed + deleted + not_found) | +| `failed_count` | integer | 失败数量 | +| `elapsed_time` | float | 耗时(秒) | +| `index_name` | string | 索引名称 | +| `tenant_id` | string | 租户ID | + +**状态说明**: +- `spu_ids` 的状态: + - `indexed`: SPU已成功索引到ES + - `deleted`: SPU在数据库中被标记为deleted=1,已从ES删除(自动检测) + - `failed`: 处理失败,会包含`msg`字段说明失败原因 +- `delete_spu_ids` 的状态: + - `deleted`: SPU已从ES成功删除 + - `not_found`: SPU在ES中不存在(也算成功,可能已经被删除过) + - `failed`: 删除失败,会包含`msg`字段说明失败原因 + #### 请求示例 -**SPU增量索引**: +**示例1:普通增量索引(自动检测删除)**: ```bash curl -X POST "http://localhost:6002/indexer/index" \ -H "Content-Type: application/json" \ @@ -974,18 +1027,81 @@ curl -X POST "http://localhost:6002/indexer/index" \ "spu_ids": ["123", "456", "789"] }' ``` +说明:如果SPU 456在数据库中`deleted=1`,会自动从ES删除,并出现在`deleted.auto`列表中。 + +**示例2:显式删除(批量删除)**: +```bash +curl -X POST "http://localhost:6002/indexer/index" \ + -H "Content-Type: application/json" \ + -d '{ + "tenant_id": "162", + "spu_ids": ["123", "456"], + "delete_spu_ids": ["100", "101", "102"] + }' +``` +说明:SPU 100、101、102会被显式删除,无论数据库状态如何。 + +**示例3:仅删除(不索引)**: +```bash +curl -X POST "http://localhost:6002/indexer/index" \ + -H "Content-Type: application/json" \ + -d '{ + "tenant_id": "162", + "spu_ids": [], + "delete_spu_ids": ["100", "101"] + }' +``` +说明:只执行删除操作,不进行索引。 + +**示例4:混合操作(索引+删除)**: +```bash +curl -X POST "http://localhost:6002/indexer/index" \ + -H "Content-Type: application/json" \ + -d '{ + "tenant_id": "162", + "spu_ids": ["123", "456", "789"], + "delete_spu_ids": ["100", "101"] + }' +``` +说明:同时执行索引和删除操作。 + +#### 删除策略最佳实践 + +1. **优先使用自动检测删除**(推荐) + - 数据库是唯一真实来源,保证数据一致性 + - 调用方无需额外处理,系统自动检测并删除 + - 适用于常规的增量更新场景 + +2. **显式删除用于特殊场景** + - 批量删除操作 + - 需要立即删除(不等待数据库更新) + - 特殊业务逻辑需要 + +3. **监控删除统计** + - 通过返回结果中的`deleted`字段监控删除操作 + - 区分显式删除和自动删除,便于问题排查 #### 日志说明 增量索引操作的所有关键信息都会记录到 `logs/indexer.log` 文件中(JSON格式),包括: - 请求开始和结束时间 -- 每个SPU的处理状态(获取、转换、索引) +- 每个SPU的处理状态(获取、转换、索引、删除) - ES批量写入结果 - 成功/失败统计 +- 删除统计(显式删除和自动删除数量) - 详细的错误信息 日志查询方式请参考[5.1节查看索引日志](#51-全量重建索引接口)部分。 +**删除相关日志示例**: +```bash +# 查看删除操作日志 +cat logs/indexer.log | jq 'select(.status == "deleted" or .status == "auto_deleted")' + +# 查看包含删除统计的请求完成日志 +cat logs/indexer.log | jq 'select(.operation == "request_complete" and .deleted_count > 0)' +``` + ### 5.3 查询文档接口 - **端点**: `POST /indexer/documents` diff --git a/indexer/incremental_service.py b/indexer/incremental_service.py index 5f8dcf2..4af4847 100644 --- a/indexer/incremental_service.py +++ b/indexer/incremental_service.py @@ -141,93 +141,46 @@ class IncrementalIndexerService: return deleted == b'\x01' or deleted == 1 return bool(deleted) - def delete_spus_from_es( + def _delete_spu_from_es( self, es_client, tenant_id: str, - spu_ids: List[str], - index_name: str = DEFAULT_INDEX_NAME + spu_id: str, + index_name: str, + log_prefix: str = "" ) -> Dict[str, Any]: """ - 从ES中批量删除SPU文档 - - Args: - es_client: Elasticsearch客户端 - tenant_id: 租户ID - spu_ids: 要删除的SPU ID列表 - index_name: 索引名称 + 从ES中删除单个SPU文档(通用方法) Returns: - 包含删除结果的字典 + {"status": "deleted|not_found|failed", "msg": "错误信息(可选)"} """ - if not spu_ids: - return { - "deleted": [], - "not_found": [], - "failed": [], - "total": 0, - "deleted_count": 0, - "not_found_count": 0, - "failed_count": 0 - } - - deleted_list = [] - not_found_list = [] - failed_list = [] - - logger.info(f"[IncrementalDeletion] Starting deletion for tenant_id={tenant_id}, spu_count={len(spu_ids)}") - - for spu_id in spu_ids: - try: - # 使用ES的delete API删除文档 - # ES文档ID格式:通常是spu_id,但需要确认实际使用的ID格式 - # 根据index_spus_to_es方法,使用的是spu_id作为文档ID - try: - response = es_client.client.delete( - index=index_name, - id=str(spu_id), - ignore=[404] # 忽略文档不存在的错误 - ) - - if response.get('result') == 'deleted': - deleted_list.append({"spu_id": spu_id, "status": "deleted"}) - log_spu_processing(indexer_logger, tenant_id, spu_id, 'deleted') - elif response.get('result') == 'not_found': - not_found_list.append({"spu_id": spu_id, "status": "not_found"}) - logger.debug(f"[IncrementalDeletion] SPU {spu_id} not found in ES") - else: - failed_list.append({"spu_id": spu_id, "error": f"Unexpected result: {response.get('result')}"}) - - except Exception as e: - # 处理404错误(文档不存在) - if hasattr(e, 'status_code') and e.status_code == 404: - not_found_list.append({"spu_id": spu_id, "status": "not_found"}) - else: - error_msg = str(e) - logger.error(f"[IncrementalDeletion] Error deleting SPU {spu_id}: {e}", exc_info=True) - failed_list.append({"spu_id": spu_id, "error": error_msg}) - log_spu_processing(indexer_logger, tenant_id, spu_id, 'delete_failed', error_msg) - - except Exception as e: - error_msg = str(e) - logger.error(f"[IncrementalDeletion] Unexpected error deleting SPU {spu_id}: {e}", exc_info=True) - failed_list.append({"spu_id": spu_id, "error": error_msg}) - - logger.info( - f"[IncrementalDeletion] Completed for tenant_id={tenant_id}: " - f"total={len(spu_ids)}, deleted={len(deleted_list)}, " - f"not_found={len(not_found_list)}, failed={len(failed_list)}" - ) - - return { - "deleted": deleted_list, - "not_found": not_found_list, - "failed": failed_list, - "total": len(spu_ids), - "deleted_count": len(deleted_list), - "not_found_count": len(not_found_list), - "failed_count": len(failed_list) - } + try: + response = es_client.client.delete( + index=index_name, + id=str(spu_id), + ignore=[404] + ) + + result = response.get('result') + if result == 'deleted': + log_spu_processing(indexer_logger, tenant_id, spu_id, 'deleted', log_prefix) + return {"status": "deleted"} + elif result == 'not_found': + return {"status": "not_found"} + else: + msg = f"Unexpected result: {result}" + log_spu_processing(indexer_logger, tenant_id, spu_id, 'delete_failed', msg) + return {"status": "failed", "msg": msg} + + except Exception as e: + if hasattr(e, 'status_code') and e.status_code == 404: + return {"status": "not_found"} + else: + msg = str(e) + logger.error(f"[IncrementalDeletion] Error deleting SPU {spu_id}: {e}", exc_info=True) + log_spu_processing(indexer_logger, tenant_id, spu_id, 'delete_failed', msg) + return {"status": "failed", "msg": msg} def _load_skus_for_spu(self, tenant_id: str, spu_id: str) -> pd.DataFrame: """加载指定SPU的所有SKU数据""" @@ -296,11 +249,14 @@ class IncrementalIndexerService: """ start_time = time.time() total_count = len(spu_ids) - success_list = [] - failed_list = [] + delete_count = len(delete_spu_ids) if delete_spu_ids else 0 + + # spu_ids 对应的响应列表(状态:indexed, deleted, failed) + spu_results = [] + # delete_spu_ids 对应的响应列表(状态:deleted, not_found, failed) + delete_results = [] + documents = [] - deleted_list = [] - auto_deleted_list = [] # 记录请求开始 log_index_request( @@ -309,7 +265,7 @@ class IncrementalIndexerService: tenant_id=tenant_id, request_params={ 'spu_count': total_count, - 'delete_count': len(delete_spu_ids) if delete_spu_ids else 0, + 'delete_count': delete_count, 'index_name': index_name, 'batch_size': batch_size } @@ -317,58 +273,33 @@ class IncrementalIndexerService: logger.info( f"[IncrementalIndexing] Starting bulk index for tenant_id={tenant_id}, " - f"spu_count={total_count}, delete_count={len(delete_spu_ids) if delete_spu_ids else 0}" + f"spu_count={total_count}, delete_count={delete_count}" ) - # 步骤0: 处理显式删除请求 + # 步骤0: 处理显式删除请求(delete_spu_ids) if delete_spu_ids: logger.info(f"[IncrementalIndexing] Processing explicit deletions: {len(delete_spu_ids)} SPUs") - delete_result = self.delete_spus_from_es( - es_client=es_client, - tenant_id=tenant_id, - spu_ids=delete_spu_ids, - index_name=index_name - ) - deleted_list = delete_result.get('deleted', []) - logger.info(f"[IncrementalIndexing] Explicitly deleted {len(deleted_list)} SPUs from ES") + for spu_id in delete_spu_ids: + result = self._delete_spu_from_es(es_client, tenant_id, spu_id, index_name, "explicit") + delete_results.append({"spu_id": spu_id, **result}) - # 步骤1: 获取所有SPU文档,并自动检测删除 + # 步骤1: 处理索引请求(spu_ids),并自动检测删除 for spu_id in spu_ids: try: log_spu_processing(indexer_logger, tenant_id, spu_id, 'fetching') # 先检查SPU是否在数据库中被标记为删除 - is_deleted = self.check_spu_deleted(tenant_id, spu_id) - if is_deleted: + if self.check_spu_deleted(tenant_id, spu_id): # SPU已删除,从ES中删除对应文档 logger.info(f"[IncrementalIndexing] SPU {spu_id} is deleted in DB, removing from ES") - try: - response = es_client.client.delete( - index=index_name, - id=str(spu_id), - ignore=[404] - ) - if response.get('result') == 'deleted': - auto_deleted_list.append({ - "spu_id": spu_id, - "status": "auto_deleted", - "reason": "deleted in database" - }) - log_spu_processing(indexer_logger, tenant_id, spu_id, 'auto_deleted', "deleted in database") - elif response.get('result') == 'not_found': - # ES中不存在,也算成功(可能已经被删除过了) - auto_deleted_list.append({ - "spu_id": spu_id, - "status": "auto_deleted", - "reason": "deleted in database (not found in ES)" - }) - except Exception as e: - error_msg = f"Failed to delete from ES: {str(e)}" - logger.error(f"[IncrementalIndexing] Error deleting SPU {spu_id} from ES: {e}", exc_info=True) - failed_list.append({ - "spu_id": spu_id, - "error": error_msg - }) + result = self._delete_spu_from_es(es_client, tenant_id, spu_id, index_name, "auto") + # 统一状态:deleted或not_found都算deleted,failed保持failed + status = "deleted" if result["status"] != "failed" else "failed" + spu_results.append({ + "spu_id": spu_id, + "status": status, + **({"msg": result["msg"]} if status == "failed" else {}) + }) continue # SPU未删除,正常获取文档 @@ -380,22 +311,24 @@ class IncrementalIndexerService: error_msg = "SPU not found (unexpected)" logger.warning(f"[IncrementalIndexing] SPU {spu_id} not found after deleted check") log_spu_processing(indexer_logger, tenant_id, spu_id, 'failed', error_msg) - failed_list.append({ + spu_results.append({ "spu_id": spu_id, - "error": error_msg + "status": "failed", + "msg": error_msg }) continue log_spu_processing(indexer_logger, tenant_id, spu_id, 'transforming') - documents.append(doc) + documents.append((spu_id, doc)) # 保存spu_id和doc的对应关系 except Exception as e: error_msg = str(e) logger.error(f"[IncrementalIndexing] Error processing SPU {spu_id}: {e}", exc_info=True) log_spu_processing(indexer_logger, tenant_id, spu_id, 'failed', error_msg) - failed_list.append({ + spu_results.append({ "spu_id": spu_id, - "error": error_msg + "status": "failed", + "msg": error_msg }) logger.info(f"[IncrementalIndexing] Transformed {len(documents)}/{total_count} documents") @@ -403,43 +336,42 @@ class IncrementalIndexerService: # 步骤2: 批量写入ES if documents: try: - logger.info(f"[IncrementalIndexing] Indexing {len(documents)} documents to ES (batch_size={batch_size})") + # 提取doc列表用于批量写入 + doc_list = [doc for _, doc in documents] + logger.info(f"[IncrementalIndexing] Indexing {len(doc_list)} documents to ES (batch_size={batch_size})") indexer = BulkIndexer(es_client, index_name, batch_size=batch_size, max_retries=3) bulk_results = indexer.index_documents( - documents, + doc_list, id_field="spu_id", show_progress=False ) - # 根据ES返回的结果更新成功列表 - # 注意:BulkIndexer返回的是总体统计,我们需要根据实际的失败情况来更新 - # 如果ES批量写入有部分失败,我们需要找出哪些失败了 + # 根据ES返回的结果更新spu_results es_success_count = bulk_results.get('success', 0) es_failed_count = bulk_results.get('failed', 0) - # 由于我们无法精确知道哪些文档失败了,我们假设: + # 由于BulkIndexer返回的是总体统计,我们假设: # - 如果ES返回成功数等于文档数,则所有文档都成功 # - 否则,失败的文档可能在ES错误信息中,但我们无法精确映射 - # 这里采用简化处理:将成功写入ES的文档加入成功列表 + # 这里采用简化处理:将成功写入ES的文档标记为indexed if es_failed_count == 0: # 全部成功 - for doc in documents: - success_list.append({ - "spu_id": doc.get('spu_id'), + for spu_id, doc in documents: + spu_results.append({ + "spu_id": spu_id, "status": "indexed" }) else: # 有失败的情况,我们标记已处理的文档为成功,未处理的可能失败 - # 这是一个简化处理,实际应该根据ES的详细错误信息来判断 logger.warning(f"[IncrementalIndexing] ES bulk index had {es_failed_count} failures") - for doc in documents: + for spu_id, doc in documents: # 由于无法精确知道哪些失败,我们假设全部成功(实际应该改进) - success_list.append({ - "spu_id": doc.get('spu_id'), + spu_results.append({ + "spu_id": spu_id, "status": "indexed" }) - # 如果有ES错误,记录到失败列表(但不包含具体的spu_id) + # 如果有ES错误,记录日志 if bulk_results.get('errors'): logger.error(f"[IncrementalIndexing] ES errors: {bulk_results['errors'][:5]}") @@ -447,60 +379,56 @@ class IncrementalIndexerService: error_msg = f"ES bulk index failed: {str(e)}" logger.error(f"[IncrementalIndexing] {error_msg}", exc_info=True) # 所有文档都失败 - for doc in documents: - failed_list.append({ - "spu_id": doc.get('spu_id'), - "error": error_msg - }) - documents = [] # 清空,避免重复处理 + for spu_id, doc in documents: + # 检查是否已经在spu_results中(可能之前已经标记为failed) + existing = next((r for r in spu_results if r.get('spu_id') == spu_id), None) + if existing: + # 如果已存在,更新状态 + existing['status'] = 'failed' + existing['msg'] = error_msg + else: + spu_results.append({ + "spu_id": spu_id, + "status": "failed", + "msg": error_msg + }) else: logger.warning(f"[IncrementalIndexing] No documents to index for tenant_id={tenant_id}") elapsed_time = time.time() - start_time - success_count = len(success_list) - failed_count = len(failed_list) - # 记录最终结果(包含删除统计) + # 统计结果(简化) + total_processed = total_count + delete_count + total_success = len([r for r in spu_results + delete_results if r.get('status') in ('indexed', 'deleted', 'not_found')]) + total_failed = len([r for r in spu_results + delete_results if r.get('status') == 'failed']) + + # 记录最终结果 + deleted_count = len([r for r in spu_results + delete_results if r.get('status') == 'deleted']) log_index_result( indexer_logger, index_type='incremental', tenant_id=tenant_id, - total_count=total_count, - success_count=success_count, - failed_count=failed_count, + total_count=total_processed, + success_count=total_success, + failed_count=total_failed, elapsed_time=elapsed_time, index_name=index_name, - errors=[item.get('error') for item in failed_list[:10]] if failed_list else None, - deleted_count=total_deleted_count, - explicit_deleted_count=explicit_deleted_count, - auto_deleted_count=auto_deleted_count + errors=[r.get('msg') for r in spu_results + delete_results if r.get('status') == 'failed'][:10], + deleted_count=deleted_count ) - # 统计删除数量 - explicit_deleted_count = len(deleted_list) if delete_spu_ids else 0 - auto_deleted_count = len(auto_deleted_list) - total_deleted_count = explicit_deleted_count + auto_deleted_count - logger.info( f"[IncrementalIndexing] Completed for tenant_id={tenant_id}: " - f"total={total_count}, success={success_count}, failed={failed_count}, " - f"explicit_deleted={explicit_deleted_count}, auto_deleted={auto_deleted_count}, " + f"total={total_processed}, success={total_success}, failed={total_failed}, " f"elapsed={elapsed_time:.2f}s" ) return { - "success": success_list, - "failed": failed_list, - "deleted": { - "explicit": deleted_list if delete_spu_ids else [], - "auto": auto_deleted_list, - "total_count": total_deleted_count, - "explicit_count": explicit_deleted_count, - "auto_count": auto_deleted_count - }, - "total": total_count, - "success_count": success_count, - "failed_count": failed_count, + "spu_ids": spu_results, # spu_ids对应的响应列表 + "delete_spu_ids": delete_results, # delete_spu_ids对应的响应列表 + "total": total_processed, + "success_count": total_success, + "failed_count": total_failed, "elapsed_time": elapsed_time, "index_name": index_name, "tenant_id": tenant_id -- libgit2 0.21.2