Blame view

indexer/incremental_service.py 29.3 KB
3c1f8031   tangwang   api/routes/indexe...
1
  """增量数据获取服务"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
2
3
  
  import pandas as pd
0064e946   tangwang   feat: 增量索引服务、租户配置...
4
  import logging
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
5
  import time
5c2b70a2   tangwang   search_products.json
6
7
8
  import threading
  from typing import Dict, Any, Optional, List, Tuple
  from sqlalchemy import text, bindparam
3c1f8031   tangwang   api/routes/indexe...
9
  from indexer.indexing_utils import load_category_mapping, create_document_transformer
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
10
11
12
13
14
  from indexer.bulk_indexer import BulkIndexer
  from indexer.mapping_generator import DEFAULT_INDEX_NAME
  from indexer.indexer_logger import (
      get_indexer_logger, log_index_request, log_index_result, log_spu_processing
  )
5c2b70a2   tangwang   search_products.json
15
  from config import ConfigLoader
0064e946   tangwang   feat: 增量索引服务、租户配置...
16
17
18
  
  # Configure logger
  logger = logging.getLogger(__name__)
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
19
20
  # Indexer专用日志器
  indexer_logger = get_indexer_logger()
0064e946   tangwang   feat: 增量索引服务、租户配置...
21
22
23
  
  
  class IncrementalIndexerService:
3c1f8031   tangwang   api/routes/indexe...
24
      """增量索引服务,提供SPU数据获取功能。"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
25
26
  
      def __init__(self, db_engine: Any):
3c1f8031   tangwang   api/routes/indexe...
27
          """初始化增量索引服务"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
28
29
30
          self.db_engine = db_engine
          
          # 预加载分类映射(全局,所有租户共享)
3c1f8031   tangwang   api/routes/indexe...
31
          self.category_id_to_name = load_category_mapping(db_engine)
0064e946   tangwang   feat: 增量索引服务、租户配置...
32
          logger.info(f"Preloaded {len(self.category_id_to_name)} category mappings")
0064e946   tangwang   feat: 增量索引服务、租户配置...
33
  
5c2b70a2   tangwang   search_products.json
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
          # 缓存:避免频繁增量请求重复加载config / 构造transformer
          self._config: Optional[Any] = None
          self._config_lock = threading.Lock()
          # tenant_id -> (transformer, encoder, enable_embedding)
          self._transformer_cache: Dict[str, Tuple[Any, Optional[Any], bool]] = {}
          self._transformer_cache_lock = threading.Lock()
  
      def _get_config(self) -> Any:
          """Load config once per process (thread-safe)."""
          if self._config is not None:
              return self._config
          with self._config_lock:
              if self._config is None:
                  self._config = ConfigLoader().load_config()
          return self._config
  
      def _get_transformer_bundle(self, tenant_id: str) -> Tuple[Any, Optional[Any], bool]:
          """
          Get a cached document transformer for tenant_id.
  
          - Transformer is built once per tenant (in-process cache).
          - We disable per-document embedding generation inside transformer, and instead
            batch-generate embeddings in index_spus_to_es for performance.
          """
          with self._transformer_cache_lock:
              cached = self._transformer_cache.get(str(tenant_id))
              if cached is not None:
                  return cached
  
          config = self._get_config()
          enable_embedding = bool(getattr(config.query_config, "enable_text_embedding", False))
  
          encoder: Optional[Any] = None
          if enable_embedding:
              try:
                  from embeddings.text_encoder import BgeEncoder
                  encoder = BgeEncoder()
              except Exception as e:
                  logger.warning(f"Failed to initialize BgeEncoder for tenant_id={tenant_id}: {e}")
                  encoder = None
                  enable_embedding = False
  
          transformer = create_document_transformer(
              category_id_to_name=self.category_id_to_name,
              tenant_id=tenant_id,
              encoder=encoder,
              enable_title_embedding=False,  # batch fill later
              config=config,
          )
  
          bundle = (transformer, encoder, enable_embedding)
          with self._transformer_cache_lock:
              # simple unbounded cache; tenant count is typically small in one node
              self._transformer_cache[str(tenant_id)] = bundle
          return bundle
  
      @staticmethod
      def _normalize_spu_ids(spu_ids: List[str]) -> List[int]:
          """Normalize SPU IDs to ints for DB queries; skip non-int IDs."""
          out: List[int] = []
          for x in spu_ids:
              try:
                  out.append(int(x))
              except Exception:
                  continue
          return out
  
0064e946   tangwang   feat: 增量索引服务、租户配置...
101
      def get_spu_document(self, tenant_id: str, spu_id: str) -> Optional[Dict[str, Any]]:
3c1f8031   tangwang   api/routes/indexe...
102
          """获取SPU的ES文档数据"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
103
104
105
106
107
108
109
110
111
112
113
114
115
          try:
              # 加载SPU数据
              spu_row = self._load_single_spu(tenant_id, spu_id)
              if spu_row is None:
                  logger.warning(f"SPU {spu_id} not found for tenant_id={tenant_id}")
                  return None
  
              # 加载SKU数据
              skus_df = self._load_skus_for_spu(tenant_id, spu_id)
  
              # 加载Option数据
              options_df = self._load_options_for_spu(tenant_id, spu_id)
  
5c2b70a2   tangwang   search_products.json
116
              transformer, encoder, enable_embedding = self._get_transformer_bundle(tenant_id)
0064e946   tangwang   feat: 增量索引服务、租户配置...
117
118
119
120
121
122
123
124
125
126
127
128
129
              
              # 转换为ES文档
              doc = transformer.transform_spu_to_doc(
                  tenant_id=tenant_id,
                  spu_row=spu_row,
                  skus=skus_df,
                  options=options_df
              )
              
              if doc is None:
                  logger.warning(f"Failed to transform SPU {spu_id} for tenant_id={tenant_id}")
                  return None
  
5c2b70a2   tangwang   search_products.json
130
131
132
133
134
135
136
137
138
139
140
              # 单条场景下也可补齐 embedding(仍走缓存)
              if enable_embedding and encoder:
                  title_text = doc.get("title_en") or doc.get("title_zh")
                  if title_text and str(title_text).strip():
                      try:
                          embeddings = encoder.encode(title_text)
                          if embeddings is not None and len(embeddings) > 0:
                              doc["title_embedding"] = embeddings[0].tolist()
                      except Exception as e:
                          logger.warning(f"Failed to generate embedding for spu_id={spu_id}: {e}")
  
0064e946   tangwang   feat: 增量索引服务、租户配置...
141
142
143
144
145
146
              return doc
  
          except Exception as e:
              logger.error(f"Error getting SPU document for tenant_id={tenant_id}, spu_id={spu_id}: {e}", exc_info=True)
              raise
  
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
      def _load_single_spu(self, tenant_id: str, spu_id: str, include_deleted: bool = False) -> Optional[pd.Series]:
          """
          加载单个SPU数据
          
          Args:
              tenant_id: 租户ID
              spu_id: SPU ID
              include_deleted: 是否包含已删除的记录(用于检查删除状态)
          
          Returns:
              SPU数据Series,如果不存在返回None
          """
          if include_deleted:
              # 查询所有记录(包括已删除的),用于检查删除状态
              query = text("""
                  SELECT 
                      id, shop_id, shoplazza_id, title, brief, description,
                      spu, vendor, vendor_url,
                      image_src, image_width, image_height, image_path, image_alt,
                      tags, note, category, category_id, category_google_id,
                      category_level, category_path,
                      fake_sales, display_fake_sales,
                      tenant_id, creator, create_time, updater, update_time, deleted
                  FROM shoplazza_product_spu
                  WHERE tenant_id = :tenant_id AND id = :spu_id
                  LIMIT 1
              """)
          else:
              # 只查询未删除的记录
              query = text("""
                  SELECT 
                      id, shop_id, shoplazza_id, title, brief, description,
                      spu, vendor, vendor_url,
                      image_src, image_width, image_height, image_path, image_alt,
                      tags, note, category, category_id, category_google_id,
                      category_level, category_path,
                      fake_sales, display_fake_sales,
                      tenant_id, creator, create_time, updater, update_time, deleted
                  FROM shoplazza_product_spu
                  WHERE tenant_id = :tenant_id AND id = :spu_id AND deleted = 0
                  LIMIT 1
              """)
0064e946   tangwang   feat: 增量索引服务、租户配置...
189
190
191
192
193
194
195
196
          
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_id": spu_id})
          
          if df.empty:
              return None
          
          return df.iloc[0]
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
      
      def check_spu_deleted(self, tenant_id: str, spu_id: str) -> bool:
          """
          检查SPU是否在数据库中被标记为删除
          
          Args:
              tenant_id: 租户ID
              spu_id: SPU ID
          
          Returns:
              True表示已删除,False表示未删除或不存在
          """
          spu_row = self._load_single_spu(tenant_id, spu_id, include_deleted=True)
          if spu_row is None:
              # SPU不存在,视为需要删除
              return True
          # 检查deleted字段(可能是bit类型,需要转换为int或bool)
          deleted = spu_row.get('deleted', 0)
          # 处理bit类型:可能是b'\x00'或b'\x01',或者直接是0/1
          if isinstance(deleted, bytes):
              return deleted == b'\x01' or deleted == 1
          return bool(deleted)
5c2b70a2   tangwang   search_products.json
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
  
      def _load_spus_for_spu_ids(self, tenant_id: str, spu_ids: List[str], include_deleted: bool = True) -> pd.DataFrame:
          """Batch load SPU rows for a list of spu_ids using IN (...)"""
          spu_ids_int = self._normalize_spu_ids(spu_ids)
          if not spu_ids_int:
              return pd.DataFrame()
  
          if include_deleted:
              query = text(
                  """
                  SELECT 
                      id, shop_id, shoplazza_id, title, brief, description,
                      spu, vendor, vendor_url,
                      image_src, image_width, image_height, image_path, image_alt,
                      tags, note, category, category_id, category_google_id,
                      category_level, category_path,
                      fake_sales, display_fake_sales,
                      tenant_id, creator, create_time, updater, update_time, deleted
                  FROM shoplazza_product_spu
                  WHERE tenant_id = :tenant_id AND id IN :spu_ids
                  """
              ).bindparams(bindparam("spu_ids", expanding=True))
          else:
              query = text(
                  """
                  SELECT 
                      id, shop_id, shoplazza_id, title, brief, description,
                      spu, vendor, vendor_url,
                      image_src, image_width, image_height, image_path, image_alt,
                      tags, note, category, category_id, category_google_id,
                      category_level, category_path,
                      fake_sales, display_fake_sales,
                      tenant_id, creator, create_time, updater, update_time, deleted
                  FROM shoplazza_product_spu
                  WHERE tenant_id = :tenant_id AND deleted = 0 AND id IN :spu_ids
                  """
              ).bindparams(bindparam("spu_ids", expanding=True))
  
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_ids": spu_ids_int})
          return df
  
      def _load_skus_for_spu_ids(self, tenant_id: str, spu_ids: List[str]) -> pd.DataFrame:
          """Batch load all SKUs for a list of spu_ids"""
          spu_ids_int = self._normalize_spu_ids(spu_ids)
          if not spu_ids_int:
              return pd.DataFrame()
  
          query = text(
              """
              SELECT 
                  id, spu_id, shop_id, shoplazza_id, shoplazza_product_id,
                  shoplazza_image_id, title, sku, barcode, position,
                  price, compare_at_price, cost_price,
                  option1, option2, option3,
                  inventory_quantity, weight, weight_unit, image_src,
                  wholesale_price, note, extend,
                  shoplazza_created_at, shoplazza_updated_at, tenant_id,
                  creator, create_time, updater, update_time, deleted
              FROM shoplazza_product_sku
              WHERE tenant_id = :tenant_id AND deleted = 0 AND spu_id IN :spu_ids
              """
          ).bindparams(bindparam("spu_ids", expanding=True))
  
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_ids": spu_ids_int})
          return df
  
      def _load_options_for_spu_ids(self, tenant_id: str, spu_ids: List[str]) -> pd.DataFrame:
          """Batch load all options for a list of spu_ids"""
          spu_ids_int = self._normalize_spu_ids(spu_ids)
          if not spu_ids_int:
              return pd.DataFrame()
  
          query = text(
              """
              SELECT 
                  id, spu_id, shop_id, shoplazza_id, shoplazza_product_id,
                  position, name, `values`, tenant_id,
                  creator, create_time, updater, update_time, deleted
              FROM shoplazza_product_option
              WHERE tenant_id = :tenant_id AND deleted = 0 AND spu_id IN :spu_ids
              ORDER BY spu_id, position
              """
          ).bindparams(bindparam("spu_ids", expanding=True))
  
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_ids": spu_ids_int})
          return df
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
308
      
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
309
      def _delete_spu_from_es(
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
310
311
312
          self,
          es_client,
          tenant_id: str,
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
313
314
315
          spu_id: str,
          index_name: str,
          log_prefix: str = ""
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
316
317
      ) -> Dict[str, Any]:
          """
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
318
          ES中删除单个SPU文档(通用方法)
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
319
320
          
          Returns:
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
321
              {"status": "deleted|not_found|failed", "msg": "错误信息(可选)"}
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
322
          """
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
          try:
              response = es_client.client.delete(
                  index=index_name,
                  id=str(spu_id),
                  ignore=[404]
              )
              
              result = response.get('result')
              if result == 'deleted':
                  log_spu_processing(indexer_logger, tenant_id, spu_id, 'deleted', log_prefix)
                  return {"status": "deleted"}
              elif result == 'not_found':
                  return {"status": "not_found"}
              else:
                  msg = f"Unexpected result: {result}"
                  log_spu_processing(indexer_logger, tenant_id, spu_id, 'delete_failed', msg)
                  return {"status": "failed", "msg": msg}
                  
          except Exception as e:
              if hasattr(e, 'status_code') and e.status_code == 404:
                  return {"status": "not_found"}
              else:
                  msg = str(e)
                  logger.error(f"[IncrementalDeletion] Error deleting SPU {spu_id}: {e}", exc_info=True)
                  log_spu_processing(indexer_logger, tenant_id, spu_id, 'delete_failed', msg)
                  return {"status": "failed", "msg": msg}
0064e946   tangwang   feat: 增量索引服务、租户配置...
349
350
  
      def _load_skus_for_spu(self, tenant_id: str, spu_id: str) -> pd.DataFrame:
3c1f8031   tangwang   api/routes/indexe...
351
          """加载指定SPU的所有SKU数据"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
          query = text("""
              SELECT 
                  id, spu_id, shop_id, shoplazza_id, shoplazza_product_id,
                  shoplazza_image_id, title, sku, barcode, position,
                  price, compare_at_price, cost_price,
                  option1, option2, option3,
                  inventory_quantity, weight, weight_unit, image_src,
                  wholesale_price, note, extend,
                  shoplazza_created_at, shoplazza_updated_at, tenant_id,
                  creator, create_time, updater, update_time, deleted
              FROM shoplazza_product_sku
              WHERE tenant_id = :tenant_id AND spu_id = :spu_id AND deleted = 0
          """)
          
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_id": spu_id})
          
          return df
  
      def _load_options_for_spu(self, tenant_id: str, spu_id: str) -> pd.DataFrame:
3c1f8031   tangwang   api/routes/indexe...
372
          """加载指定SPU的所有Option数据"""
0064e946   tangwang   feat: 增量索引服务、租户配置...
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
          query = text("""
              SELECT 
                  id, spu_id, shop_id, shoplazza_id, shoplazza_product_id,
                  position, name, `values`, tenant_id,
                  creator, create_time, updater, update_time, deleted
              FROM shoplazza_product_option
              WHERE tenant_id = :tenant_id AND spu_id = :spu_id AND deleted = 0
              ORDER BY position
          """)
          
          with self.db_engine.connect() as conn:
              df = pd.read_sql(query, conn, params={"tenant_id": tenant_id, "spu_id": spu_id})
          
          return df
  
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
388
389
390
391
392
393
      def index_spus_to_es(
          self,
          es_client,
          tenant_id: str,
          spu_ids: List[str],
          index_name: str = DEFAULT_INDEX_NAME,
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
394
395
          batch_size: int = 100,
          delete_spu_ids: List[str] = None
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
396
397
398
399
      ) -> Dict[str, Any]:
          """
          批量索引SPUES(增量索引)
          
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
400
401
402
403
          支持两种删除方式:
          1. 自动检测删除:根据数据库deleted字段自动检测并删除ES中的文档
          2. 显式删除:通过delete_spu_ids参数显式指定要删除的SPU
          
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
404
405
406
          Args:
              es_client: Elasticsearch客户端
              tenant_id: 租户ID
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
407
              spu_ids: SPU ID列表(要索引的)
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
408
409
              index_name: 索引名称
              batch_size: 批量写入ES的批次大小
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
410
              delete_spu_ids: 显式指定要删除的SPU ID列表(可选)
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
411
412
              
          Returns:
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
413
              包含成功/失败列表的字典,以及删除结果
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
414
          """
5c2b70a2   tangwang   search_products.json
415
416
417
418
          # 去重但保持顺序(避免重复DB/翻译/embedding/写ES)
          if spu_ids:
              spu_ids = list(dict.fromkeys(spu_ids))
  
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
419
420
          start_time = time.time()
          total_count = len(spu_ids)
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
421
422
423
424
425
426
427
          delete_count = len(delete_spu_ids) if delete_spu_ids else 0
          
          # spu_ids 对应的响应列表(状态:indexed, deleted, failed)
          spu_results = []
          # delete_spu_ids 对应的响应列表(状态:deleted, not_found, failed)
          delete_results = []
          
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
428
          documents = []
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
429
430
431
432
433
434
435
436
          
          # 记录请求开始
          log_index_request(
              indexer_logger,
              index_type='incremental',
              tenant_id=tenant_id,
              request_params={
                  'spu_count': total_count,
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
437
                  'delete_count': delete_count,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
438
439
440
441
442
                  'index_name': index_name,
                  'batch_size': batch_size
              }
          )
          
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
443
444
          logger.info(
              f"[IncrementalIndexing] Starting bulk index for tenant_id={tenant_id}, "
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
445
              f"spu_count={total_count}, delete_count={delete_count}"
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
446
          )
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
447
          
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
448
          # 步骤0: 处理显式删除请求(delete_spu_ids)
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
449
450
          if delete_spu_ids:
              logger.info(f"[IncrementalIndexing] Processing explicit deletions: {len(delete_spu_ids)} SPUs")
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
451
452
453
              for spu_id in delete_spu_ids:
                  result = self._delete_spu_from_es(es_client, tenant_id, spu_id, index_name, "explicit")
                  delete_results.append({"spu_id": spu_id, **result})
f54b3854   tangwang   pu_ids参数。目前总共3个参数:
454
          
5c2b70a2   tangwang   search_products.json
455
456
457
458
459
460
461
462
463
464
465
          # 步骤1: 批量获取SPU/SKU/Option数据,并自动检测删除
          if spu_ids:
              log_spu_processing(indexer_logger, tenant_id, ",".join(spu_ids[:10]), 'fetching')
  
              # 批量加载SPU(包含deleted字段,用于判断删除)
              spu_df = self._load_spus_for_spu_ids(tenant_id, spu_ids, include_deleted=True)
              if spu_df.empty:
                  # 所有SPU都不存在,按“需要删除”处理
                  for spu_id in spu_ids:
                      logger.info(f"[IncrementalIndexing] SPU {spu_id} not found in DB, removing from ES")
                      result = self._delete_spu_from_es(es_client, tenant_id, spu_id, index_name, "auto_missing")
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
466
467
468
469
470
471
                      status = "deleted" if result["status"] != "failed" else "failed"
                      spu_results.append({
                          "spu_id": spu_id,
                          "status": status,
                          **({"msg": result["msg"]} if status == "failed" else {})
                      })
5c2b70a2   tangwang   search_products.json
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
              else:
                  # 建立索引:id -> row
                  spu_df = spu_df.copy()
                  # Normalize deleted column to bool
                  def _is_deleted_value(v: Any) -> bool:
                      if isinstance(v, bytes):
                          return v == b"\x01" or v == 1
                      return bool(v)
  
                  spu_df["_is_deleted"] = spu_df["deleted"].apply(_is_deleted_value)
                  spu_df.set_index("id", inplace=True, drop=False)
  
                  found_ids = set(int(x) for x in spu_df.index.tolist())
                  requested_ids_int = set(self._normalize_spu_ids(spu_ids))
                  missing_ids_int = requested_ids_int - found_ids
  
                  # missing -> delete from ES
                  for missing_id in sorted(missing_ids_int):
                      spu_id_str = str(missing_id)
                      logger.info(f"[IncrementalIndexing] SPU {spu_id_str} not found in DB, removing from ES")
                      result = self._delete_spu_from_es(es_client, tenant_id, spu_id_str, index_name, "auto_missing")
                      status = "deleted" if result["status"] != "failed" else "failed"
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
494
                      spu_results.append({
5c2b70a2   tangwang   search_products.json
495
496
497
                          "spu_id": spu_id_str,
                          "status": status,
                          **({"msg": result["msg"]} if status == "failed" else {})
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
498
                      })
5c2b70a2   tangwang   search_products.json
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
  
                  # deleted -> delete from ES
                  deleted_rows = spu_df[spu_df["_is_deleted"]]
                  for _, row in deleted_rows.iterrows():
                      spu_id_str = str(int(row["id"]))
                      logger.info(f"[IncrementalIndexing] SPU {spu_id_str} is deleted in DB, removing from ES")
                      result = self._delete_spu_from_es(es_client, tenant_id, spu_id_str, index_name, "auto")
                      status = "deleted" if result["status"] != "failed" else "failed"
                      spu_results.append({
                          "spu_id": spu_id_str,
                          "status": status,
                          **({"msg": result["msg"]} if status == "failed" else {})
                      })
  
                  # active -> batch load sku/option then transform
                  active_spu_df = spu_df[~spu_df["_is_deleted"]]
                  active_ids_str = [str(int(x)) for x in active_spu_df["id"].tolist()]
  
                  skus_df = self._load_skus_for_spu_ids(tenant_id, active_ids_str)
                  options_df = self._load_options_for_spu_ids(tenant_id, active_ids_str)
                  sku_groups = skus_df.groupby("spu_id") if not skus_df.empty else None
                  option_groups = options_df.groupby("spu_id") if not options_df.empty else None
  
                  transformer, encoder, enable_embedding = self._get_transformer_bundle(tenant_id)
  
                  # 按输入顺序处理 active SPUs
                  for spu_id in spu_ids:
                      try:
                          spu_id_int = int(spu_id)
                      except Exception:
                          continue
                      if spu_id_int not in active_spu_df.index:
                          continue
  
                      log_spu_processing(indexer_logger, tenant_id, spu_id, 'transforming')
                      spu_row = active_spu_df.loc[spu_id_int]
                      skus_for_spu = sku_groups.get_group(spu_id_int) if sku_groups is not None and spu_id_int in sku_groups.groups else pd.DataFrame()
                      opts_for_spu = option_groups.get_group(spu_id_int) if option_groups is not None and spu_id_int in option_groups.groups else pd.DataFrame()
  
                      doc = transformer.transform_spu_to_doc(
                          tenant_id=tenant_id,
                          spu_row=spu_row,
                          skus=skus_for_spu,
                          options=opts_for_spu,
                      )
                      if doc is None:
                          error_msg = "SPU transform returned None"
                          log_spu_processing(indexer_logger, tenant_id, spu_id, 'failed', error_msg)
                          spu_results.append({"spu_id": spu_id, "status": "failed", "msg": error_msg})
                          continue
  
                      documents.append((spu_id, doc))
  
                  # 批量生成 embedding(保持翻译逻辑不变;embedding 走缓存)
                  if enable_embedding and encoder and documents:
                      title_texts: List[str] = []
                      title_doc_indices: List[int] = []
                      for i, (_, doc) in enumerate(documents):
                          title_text = doc.get("title_en") or doc.get("title_zh")
                          if title_text and str(title_text).strip():
                              title_texts.append(str(title_text))
                              title_doc_indices.append(i)
  
                      if title_texts:
                          try:
                              embeddings = encoder.encode_batch(title_texts, batch_size=32)
                              for j, emb in enumerate(embeddings):
                                  doc_idx = title_doc_indices[j]
                                  documents[doc_idx][1]["title_embedding"] = emb.tolist()
                          except Exception as e:
                              logger.warning(f"[IncrementalIndexing] Batch embedding failed for tenant_id={tenant_id}: {e}", exc_info=True)
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
570
571
572
573
574
575
          
          logger.info(f"[IncrementalIndexing] Transformed {len(documents)}/{total_count} documents")
          
          # 步骤2: 批量写入ES
          if documents:
              try:
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
576
577
578
                  # 提取doc列表用于批量写入
                  doc_list = [doc for _, doc in documents]
                  logger.info(f"[IncrementalIndexing] Indexing {len(doc_list)} documents to ES (batch_size={batch_size})")
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
579
580
                  indexer = BulkIndexer(es_client, index_name, batch_size=batch_size, max_retries=3)
                  bulk_results = indexer.index_documents(
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
581
                      doc_list,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
582
583
584
585
                      id_field="spu_id",
                      show_progress=False
                  )
                  
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
586
                  # 根据ES返回的结果更新spu_results
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
587
588
589
                  es_success_count = bulk_results.get('success', 0)
                  es_failed_count = bulk_results.get('failed', 0)
                  
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
590
                  # 由于BulkIndexer返回的是总体统计,我们假设:
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
591
592
                  # - 如果ES返回成功数等于文档数,则所有文档都成功
                  # - 否则,失败的文档可能在ES错误信息中,但我们无法精确映射
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
593
                  # 这里采用简化处理:将成功写入ES的文档标记为indexed
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
594
595
                  if es_failed_count == 0:
                      # 全部成功
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
596
597
598
                      for spu_id, doc in documents:
                          spu_results.append({
                              "spu_id": spu_id,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
599
600
601
602
                              "status": "indexed"
                          })
                  else:
                      # 有失败的情况,我们标记已处理的文档为成功,未处理的可能失败
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
603
                      logger.warning(f"[IncrementalIndexing] ES bulk index had {es_failed_count} failures")
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
604
                      for spu_id, doc in documents:
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
605
                          # 由于无法精确知道哪些失败,我们假设全部成功(实际应该改进)
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
606
607
                          spu_results.append({
                              "spu_id": spu_id,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
608
609
610
                              "status": "indexed"
                          })
                      
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
611
                      # 如果有ES错误,记录日志
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
612
613
614
615
616
617
618
                      if bulk_results.get('errors'):
                          logger.error(f"[IncrementalIndexing] ES errors: {bulk_results['errors'][:5]}")
                  
              except Exception as e:
                  error_msg = f"ES bulk index failed: {str(e)}"
                  logger.error(f"[IncrementalIndexing] {error_msg}", exc_info=True)
                  # 所有文档都失败
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
619
620
621
622
623
624
625
626
627
628
629
630
631
                  for spu_id, doc in documents:
                      # 检查是否已经在spu_results中(可能之前已经标记为failed)
                      existing = next((r for r in spu_results if r.get('spu_id') == spu_id), None)
                      if existing:
                          # 如果已存在,更新状态
                          existing['status'] = 'failed'
                          existing['msg'] = error_msg
                      else:
                          spu_results.append({
                              "spu_id": spu_id,
                              "status": "failed",
                              "msg": error_msg
                          })
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
632
633
634
635
          else:
              logger.warning(f"[IncrementalIndexing] No documents to index for tenant_id={tenant_id}")
          
          elapsed_time = time.time() - start_time
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
636
          
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
637
638
639
640
641
642
643
          # 统计结果(简化)
          total_processed = total_count + delete_count
          total_success = len([r for r in spu_results + delete_results if r.get('status') in ('indexed', 'deleted', 'not_found')])
          total_failed = len([r for r in spu_results + delete_results if r.get('status') == 'failed'])
          
          # 记录最终结果
          deleted_count = len([r for r in spu_results + delete_results if r.get('status') == 'deleted'])
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
644
645
646
647
          log_index_result(
              indexer_logger,
              index_type='incremental',
              tenant_id=tenant_id,
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
648
649
650
              total_count=total_processed,
              success_count=total_success,
              failed_count=total_failed,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
651
652
              elapsed_time=elapsed_time,
              index_name=index_name,
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
653
654
              errors=[r.get('msg') for r in spu_results + delete_results if r.get('status') == 'failed'][:10],
              deleted_count=deleted_count
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
655
656
          )
          
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
657
658
          logger.info(
              f"[IncrementalIndexing] Completed for tenant_id={tenant_id}: "
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
659
              f"total={total_processed}, success={total_success}, failed={total_failed}, "
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
660
661
662
663
              f"elapsed={elapsed_time:.2f}s"
          )
          
          return {
c797ba2b   tangwang   1. 增量索引接口,增加删除操作后...
664
665
666
667
668
              "spu_ids": spu_results,  # spu_ids对应的响应列表
              "delete_spu_ids": delete_results,  # delete_spu_ids对应的响应列表
              "total": total_processed,
              "success_count": total_success,
              "failed_count": total_failed,
c55c5e47   tangwang   feat: 新增增量索引接口并重构...
669
670
671
672
              "elapsed_time": elapsed_time,
              "index_name": index_name,
              "tenant_id": tenant_id
          }
0064e946   tangwang   feat: 增量索引服务、租户配置...