Commit 72e7256ae49c8422d6008aed15677168cb705cc9
1 parent
f1505d1b
清理文件
Showing
15 changed files
with
0 additions
and
1998 deletions
Show diff stats
scripts/generate_test_data.py deleted
| @@ -1,421 +0,0 @@ | @@ -1,421 +0,0 @@ | ||
| 1 | -#!/usr/bin/env python3 | ||
| 2 | -""" | ||
| 3 | -Generate test data for Shoplazza SPU and SKU tables. | ||
| 4 | - | ||
| 5 | -Generates 100 SPU records with 1-5 SKUs each. | ||
| 6 | -""" | ||
| 7 | - | ||
| 8 | -import sys | ||
| 9 | -import os | ||
| 10 | -import random | ||
| 11 | -import argparse | ||
| 12 | -from pathlib import Path | ||
| 13 | -from datetime import datetime, timedelta | ||
| 14 | - | ||
| 15 | -# Add parent directory to path | ||
| 16 | -sys.path.insert(0, str(Path(__file__).parent.parent)) | ||
| 17 | - | ||
| 18 | - | ||
| 19 | -def generate_spu_data(num_spus: int = 100, tenant_id: str = "1", start_id: int = 1): | ||
| 20 | - """ | ||
| 21 | - Generate SPU test data. | ||
| 22 | - | ||
| 23 | - Args: | ||
| 24 | - num_spus: Number of SPUs to generate | ||
| 25 | - tenant_id: Tenant ID | ||
| 26 | - start_id: Starting ID for SPUs | ||
| 27 | - | ||
| 28 | - Returns: | ||
| 29 | - List of SPU data dictionaries | ||
| 30 | - """ | ||
| 31 | - categories = ["电子产品", "服装", "家居用品", "美妆", "食品", "运动用品", "图书", "玩具"] | ||
| 32 | - vendors = ["Sony", "Nike", "Apple", "Samsung", "华为", "小米", "美的", "海尔"] | ||
| 33 | - | ||
| 34 | - products = [ | ||
| 35 | - ("蓝牙耳机", "Bluetooth Headphone", "高品质无线蓝牙耳机", "High-quality wireless Bluetooth headphone"), | ||
| 36 | - ("运动鞋", "Running Shoes", "舒适透气的运动鞋", "Comfortable and breathable running shoes"), | ||
| 37 | - ("智能手机", "Smartphone", "高性能智能手机", "High-performance smartphone"), | ||
| 38 | - ("笔记本电脑", "Laptop", "轻薄便携笔记本电脑", "Lightweight and portable laptop"), | ||
| 39 | - ("智能手表", "Smart Watch", "多功能智能手表", "Multi-function smart watch"), | ||
| 40 | - ("平板电脑", "Tablet", "高清平板电脑", "High-definition tablet"), | ||
| 41 | - ("无线鼠标", "Wireless Mouse", "人体工学无线鼠标", "Ergonomic wireless mouse"), | ||
| 42 | - ("机械键盘", "Mechanical Keyboard", "RGB背光机械键盘", "RGB backlit mechanical keyboard"), | ||
| 43 | - ("显示器", "Monitor", "4K高清显示器", "4K high-definition monitor"), | ||
| 44 | - ("音响", "Speaker", "蓝牙无线音响", "Bluetooth wireless speaker"), | ||
| 45 | - ] | ||
| 46 | - | ||
| 47 | - spus = [] | ||
| 48 | - for i in range(num_spus): | ||
| 49 | - spu_id = start_id + i | ||
| 50 | - product = random.choice(products) | ||
| 51 | - category = random.choice(categories) | ||
| 52 | - vendor = random.choice(vendors) | ||
| 53 | - | ||
| 54 | - # Generate handle | ||
| 55 | - handle = f"product-{spu_id}" | ||
| 56 | - | ||
| 57 | - # Generate title (Chinese) | ||
| 58 | - title_zh = f"{product[0]} {vendor}" | ||
| 59 | - | ||
| 60 | - # Generate brief | ||
| 61 | - brief_zh = product[2] | ||
| 62 | - | ||
| 63 | - # Generate description | ||
| 64 | - description_zh = f"<p>{product[2]},来自{vendor}品牌。{product[3]}</p>" | ||
| 65 | - | ||
| 66 | - # Generate SEO fields | ||
| 67 | - seo_title = f"{title_zh} - {category}" | ||
| 68 | - seo_description = f"购买{vendor}{product[0]},{product[2]}" | ||
| 69 | - seo_keywords = f"{product[0]},{vendor},{category}" | ||
| 70 | - | ||
| 71 | - # Generate tags | ||
| 72 | - tags = f"{category},{vendor},{product[0]}" | ||
| 73 | - | ||
| 74 | - # Generate image | ||
| 75 | - image_src = f"//cdn.example.com/products/{spu_id}.jpg" | ||
| 76 | - | ||
| 77 | - # Generate dates | ||
| 78 | - created_at = datetime.now() - timedelta(days=random.randint(1, 365)) | ||
| 79 | - updated_at = created_at + timedelta(days=random.randint(0, 30)) | ||
| 80 | - | ||
| 81 | - spu = { | ||
| 82 | - 'id': spu_id, | ||
| 83 | - 'shop_id': 1, | ||
| 84 | - 'shoplazza_id': f"spu-{spu_id}", | ||
| 85 | - 'handle': handle, | ||
| 86 | - 'title': title_zh, | ||
| 87 | - 'brief': brief_zh, | ||
| 88 | - 'description': description_zh, | ||
| 89 | - 'spu': '', | ||
| 90 | - 'vendor': vendor, | ||
| 91 | - 'vendor_url': f"https://{vendor.lower()}.com", | ||
| 92 | - 'seo_title': seo_title, | ||
| 93 | - 'seo_description': seo_description, | ||
| 94 | - 'seo_keywords': seo_keywords, | ||
| 95 | - 'image_src': image_src, | ||
| 96 | - 'image_width': 800, | ||
| 97 | - 'image_height': 600, | ||
| 98 | - 'image_path': f"products/{spu_id}.jpg", | ||
| 99 | - 'image_alt': title_zh, | ||
| 100 | - 'inventory_policy': '', | ||
| 101 | - 'inventory_quantity': 0, | ||
| 102 | - 'inventory_tracking': '0', | ||
| 103 | - 'published': 1, | ||
| 104 | - 'published_at': created_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 105 | - 'requires_shipping': 1, | ||
| 106 | - 'taxable': 0, | ||
| 107 | - 'fake_sales': 0, | ||
| 108 | - 'display_fake_sales': 0, | ||
| 109 | - 'mixed_wholesale': 0, | ||
| 110 | - 'need_variant_image': 0, | ||
| 111 | - 'has_only_default_variant': 0, | ||
| 112 | - 'tags': tags, | ||
| 113 | - 'note': '', | ||
| 114 | - 'category': category, | ||
| 115 | - 'shoplazza_created_at': created_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 116 | - 'shoplazza_updated_at': updated_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 117 | - 'tenant_id': tenant_id, | ||
| 118 | - 'creator': '1', | ||
| 119 | - 'create_time': created_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 120 | - 'updater': '1', | ||
| 121 | - 'update_time': updated_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 122 | - 'deleted': 0 | ||
| 123 | - } | ||
| 124 | - spus.append(spu) | ||
| 125 | - | ||
| 126 | - return spus | ||
| 127 | - | ||
| 128 | - | ||
| 129 | -def generate_sku_data(spus: list, start_sku_id: int = 1): | ||
| 130 | - """ | ||
| 131 | - Generate SKU test data for SPUs. | ||
| 132 | - | ||
| 133 | - Args: | ||
| 134 | - spus: List of SPU data | ||
| 135 | - start_sku_id: Starting ID for SKUs | ||
| 136 | - | ||
| 137 | - Returns: | ||
| 138 | - List of SKU data dictionaries | ||
| 139 | - """ | ||
| 140 | - colors = ["黑色", "白色", "红色", "蓝色", "绿色", "灰色"] | ||
| 141 | - sizes = ["S", "M", "L", "XL", "XXL"] | ||
| 142 | - | ||
| 143 | - skus = [] | ||
| 144 | - sku_id = start_sku_id | ||
| 145 | - | ||
| 146 | - for spu in spus: | ||
| 147 | - spu_id = spu['id'] | ||
| 148 | - num_skus = random.randint(1, 5) | ||
| 149 | - | ||
| 150 | - # Base price | ||
| 151 | - base_price = random.uniform(50, 500) | ||
| 152 | - | ||
| 153 | - for i in range(num_skus): | ||
| 154 | - # Generate variant options | ||
| 155 | - color = random.choice(colors) if num_skus > 1 else None | ||
| 156 | - size = random.choice(sizes) if num_skus > 2 else None | ||
| 157 | - | ||
| 158 | - # Generate title | ||
| 159 | - title_parts = [] | ||
| 160 | - if color: | ||
| 161 | - title_parts.append(color) | ||
| 162 | - if size: | ||
| 163 | - title_parts.append(size) | ||
| 164 | - title = " / ".join(title_parts) if title_parts else "" | ||
| 165 | - | ||
| 166 | - # Generate SKU | ||
| 167 | - sku_code = f"SKU-{spu_id}-{i+1}" | ||
| 168 | - | ||
| 169 | - # Generate price (variation from base) | ||
| 170 | - price = base_price + random.uniform(-20, 50) | ||
| 171 | - compare_at_price = price * random.uniform(1.2, 1.5) | ||
| 172 | - | ||
| 173 | - # Generate stock | ||
| 174 | - stock = random.randint(0, 100) | ||
| 175 | - | ||
| 176 | - # Generate dates | ||
| 177 | - created_at = datetime.now() - timedelta(days=random.randint(1, 365)) | ||
| 178 | - updated_at = created_at + timedelta(days=random.randint(0, 30)) | ||
| 179 | - | ||
| 180 | - sku = { | ||
| 181 | - 'id': sku_id, | ||
| 182 | - 'spu_id': spu_id, | ||
| 183 | - 'shop_id': 1, | ||
| 184 | - 'shoplazza_id': f"sku-{sku_id}", | ||
| 185 | - 'shoplazza_product_id': spu['shoplazza_id'], | ||
| 186 | - 'shoplazza_image_id': '', | ||
| 187 | - 'title': title, | ||
| 188 | - 'sku': sku_code, | ||
| 189 | - 'barcode': f"BAR{sku_id:08d}", | ||
| 190 | - 'position': i + 1, | ||
| 191 | - 'price': round(price, 2), | ||
| 192 | - 'compare_at_price': round(compare_at_price, 2), | ||
| 193 | - 'cost_price': round(price * 0.6, 2), | ||
| 194 | - 'option1': color if color else '', | ||
| 195 | - 'option2': size if size else '', | ||
| 196 | - 'option3': '', | ||
| 197 | - 'inventory_quantity': stock, | ||
| 198 | - 'weight': round(random.uniform(0.1, 5.0), 2), | ||
| 199 | - 'weight_unit': 'kg', | ||
| 200 | - 'image_src': '', | ||
| 201 | - 'wholesale_price': '[{"price": ' + str(round(price * 0.8, 2)) + ', "minQuantity": 10}]', | ||
| 202 | - 'note': '', | ||
| 203 | - 'extend': None, # JSON field, use NULL instead of empty string | ||
| 204 | - 'shoplazza_created_at': created_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 205 | - 'shoplazza_updated_at': updated_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 206 | - 'tenant_id': spu['tenant_id'], | ||
| 207 | - 'creator': '1', | ||
| 208 | - 'create_time': created_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 209 | - 'updater': '1', | ||
| 210 | - 'update_time': updated_at.strftime('%Y-%m-%d %H:%M:%S'), | ||
| 211 | - 'deleted': 0 | ||
| 212 | - } | ||
| 213 | - skus.append(sku) | ||
| 214 | - sku_id += 1 | ||
| 215 | - | ||
| 216 | - return skus | ||
| 217 | - | ||
| 218 | - | ||
| 219 | -def escape_sql_string(value: str) -> str: | ||
| 220 | - """ | ||
| 221 | - Escape SQL string value (replace single quotes with doubled quotes). | ||
| 222 | - | ||
| 223 | - Args: | ||
| 224 | - value: String value to escape | ||
| 225 | - | ||
| 226 | - Returns: | ||
| 227 | - Escaped string | ||
| 228 | - """ | ||
| 229 | - if value is None: | ||
| 230 | - return '' | ||
| 231 | - return str(value).replace("'", "''").replace("\\", "\\\\") | ||
| 232 | - | ||
| 233 | - | ||
| 234 | -def generate_sql_inserts(spus: list, skus: list, output_file: str): | ||
| 235 | - """ | ||
| 236 | - Generate SQL INSERT statements. | ||
| 237 | - | ||
| 238 | - Args: | ||
| 239 | - spus: List of SPU data | ||
| 240 | - skus: List of SKU data | ||
| 241 | - output_file: Output file path | ||
| 242 | - """ | ||
| 243 | - with open(output_file, 'w', encoding='utf-8') as f: | ||
| 244 | - f.write("-- SPU Test Data\n") | ||
| 245 | - f.write("INSERT INTO shoplazza_product_spu (\n") | ||
| 246 | - f.write(" id, shop_id, shoplazza_id, handle, title, brief, description, spu,\n") | ||
| 247 | - f.write(" vendor, vendor_url, seo_title, seo_description, seo_keywords,\n") | ||
| 248 | - f.write(" image_src, image_width, image_height, image_path, image_alt,\n") | ||
| 249 | - f.write(" inventory_policy, inventory_quantity, inventory_tracking,\n") | ||
| 250 | - f.write(" published, published_at, requires_shipping, taxable,\n") | ||
| 251 | - f.write(" fake_sales, display_fake_sales, mixed_wholesale, need_variant_image,\n") | ||
| 252 | - f.write(" has_only_default_variant, tags, note, category,\n") | ||
| 253 | - f.write(" shoplazza_created_at, shoplazza_updated_at, tenant_id,\n") | ||
| 254 | - f.write(" creator, create_time, updater, update_time, deleted\n") | ||
| 255 | - f.write(") VALUES\n") | ||
| 256 | - | ||
| 257 | - for i, spu in enumerate(spus): | ||
| 258 | - values = ( | ||
| 259 | - f"({spu['id']}, {spu['shop_id']}, '{escape_sql_string(spu['shoplazza_id'])}', " | ||
| 260 | - f"'{escape_sql_string(spu['handle'])}', '{escape_sql_string(spu['title'])}', " | ||
| 261 | - f"'{escape_sql_string(spu['brief'])}', '{escape_sql_string(spu['description'])}', " | ||
| 262 | - f"'{escape_sql_string(spu['spu'])}', '{escape_sql_string(spu['vendor'])}', " | ||
| 263 | - f"'{escape_sql_string(spu['vendor_url'])}', '{escape_sql_string(spu['seo_title'])}', " | ||
| 264 | - f"'{escape_sql_string(spu['seo_description'])}', '{escape_sql_string(spu['seo_keywords'])}', " | ||
| 265 | - f"'{escape_sql_string(spu['image_src'])}', {spu['image_width']}, " | ||
| 266 | - f"{spu['image_height']}, '{escape_sql_string(spu['image_path'])}', " | ||
| 267 | - f"'{escape_sql_string(spu['image_alt'])}', '{escape_sql_string(spu['inventory_policy'])}', " | ||
| 268 | - f"{spu['inventory_quantity']}, '{escape_sql_string(spu['inventory_tracking'])}', " | ||
| 269 | - f"{spu['published']}, '{escape_sql_string(spu['published_at'])}', " | ||
| 270 | - f"{spu['requires_shipping']}, {spu['taxable']}, " | ||
| 271 | - f"{spu['fake_sales']}, {spu['display_fake_sales']}, {spu['mixed_wholesale']}, " | ||
| 272 | - f"{spu['need_variant_image']}, {spu['has_only_default_variant']}, " | ||
| 273 | - f"'{escape_sql_string(spu['tags'])}', '{escape_sql_string(spu['note'])}', " | ||
| 274 | - f"'{escape_sql_string(spu['category'])}', '{escape_sql_string(spu['shoplazza_created_at'])}', " | ||
| 275 | - f"'{escape_sql_string(spu['shoplazza_updated_at'])}', '{escape_sql_string(spu['tenant_id'])}', " | ||
| 276 | - f"'{escape_sql_string(spu['creator'])}', '{escape_sql_string(spu['create_time'])}', " | ||
| 277 | - f"'{escape_sql_string(spu['updater'])}', '{escape_sql_string(spu['update_time'])}', " | ||
| 278 | - f"{spu['deleted']})" | ||
| 279 | - ) | ||
| 280 | - f.write(values) | ||
| 281 | - if i < len(spus) - 1: | ||
| 282 | - f.write(",\n") | ||
| 283 | - else: | ||
| 284 | - f.write(";\n\n") | ||
| 285 | - | ||
| 286 | - f.write("-- SKU Test Data\n") | ||
| 287 | - f.write("INSERT INTO shoplazza_product_sku (\n") | ||
| 288 | - f.write(" id, spu_id, shop_id, shoplazza_id, shoplazza_product_id, shoplazza_image_id,\n") | ||
| 289 | - f.write(" title, sku, barcode, position, price, compare_at_price, cost_price,\n") | ||
| 290 | - f.write(" option1, option2, option3, inventory_quantity, weight, weight_unit,\n") | ||
| 291 | - f.write(" image_src, wholesale_price, note, extend,\n") | ||
| 292 | - f.write(" shoplazza_created_at, shoplazza_updated_at, tenant_id,\n") | ||
| 293 | - f.write(" creator, create_time, updater, update_time, deleted\n") | ||
| 294 | - f.write(") VALUES\n") | ||
| 295 | - | ||
| 296 | - for i, sku in enumerate(skus): | ||
| 297 | - # Handle extend field (JSON, can be NULL) | ||
| 298 | - extend_value = 'NULL' if sku['extend'] is None else f"'{escape_sql_string(sku['extend'])}'" | ||
| 299 | - | ||
| 300 | - values = ( | ||
| 301 | - f"({sku['id']}, {sku['spu_id']}, {sku['shop_id']}, '{escape_sql_string(sku['shoplazza_id'])}', " | ||
| 302 | - f"'{escape_sql_string(sku['shoplazza_product_id'])}', '{escape_sql_string(sku['shoplazza_image_id'])}', " | ||
| 303 | - f"'{escape_sql_string(sku['title'])}', '{escape_sql_string(sku['sku'])}', " | ||
| 304 | - f"'{escape_sql_string(sku['barcode'])}', {sku['position']}, " | ||
| 305 | - f"{sku['price']}, {sku['compare_at_price']}, {sku['cost_price']}, " | ||
| 306 | - f"'{escape_sql_string(sku['option1'])}', '{escape_sql_string(sku['option2'])}', " | ||
| 307 | - f"'{escape_sql_string(sku['option3'])}', {sku['inventory_quantity']}, {sku['weight']}, " | ||
| 308 | - f"'{escape_sql_string(sku['weight_unit'])}', '{escape_sql_string(sku['image_src'])}', " | ||
| 309 | - f"'{escape_sql_string(sku['wholesale_price'])}', '{escape_sql_string(sku['note'])}', " | ||
| 310 | - f"{extend_value}, '{escape_sql_string(sku['shoplazza_created_at'])}', " | ||
| 311 | - f"'{escape_sql_string(sku['shoplazza_updated_at'])}', '{escape_sql_string(sku['tenant_id'])}', " | ||
| 312 | - f"'{escape_sql_string(sku['creator'])}', '{escape_sql_string(sku['create_time'])}', " | ||
| 313 | - f"'{escape_sql_string(sku['updater'])}', '{escape_sql_string(sku['update_time'])}', " | ||
| 314 | - f"{sku['deleted']})" | ||
| 315 | - ) | ||
| 316 | - f.write(values) | ||
| 317 | - if i < len(skus) - 1: | ||
| 318 | - f.write(",\n") | ||
| 319 | - else: | ||
| 320 | - f.write(";\n") | ||
| 321 | - | ||
| 322 | - | ||
| 323 | -def get_max_ids_from_db(db_config=None): | ||
| 324 | - """ | ||
| 325 | - Get maximum IDs from database to avoid primary key conflicts. | ||
| 326 | - | ||
| 327 | - Args: | ||
| 328 | - db_config: Optional database config dict with keys: host, port, database, username, password | ||
| 329 | - | ||
| 330 | - Returns: | ||
| 331 | - tuple: (max_spu_id, max_sku_id) or (0, 0) if cannot connect | ||
| 332 | - """ | ||
| 333 | - if not db_config: | ||
| 334 | - return 0, 0 | ||
| 335 | - | ||
| 336 | - try: | ||
| 337 | - from utils.db_connector import create_db_connection | ||
| 338 | - from sqlalchemy import text | ||
| 339 | - | ||
| 340 | - db_engine = create_db_connection( | ||
| 341 | - host=db_config['host'], | ||
| 342 | - port=db_config['port'], | ||
| 343 | - database=db_config['database'], | ||
| 344 | - username=db_config['username'], | ||
| 345 | - password=db_config['password'] | ||
| 346 | - ) | ||
| 347 | - | ||
| 348 | - with db_engine.connect() as conn: | ||
| 349 | - result = conn.execute(text('SELECT MAX(id) FROM shoplazza_product_spu')) | ||
| 350 | - max_spu_id = result.scalar() or 0 | ||
| 351 | - | ||
| 352 | - result = conn.execute(text('SELECT MAX(id) FROM shoplazza_product_sku')) | ||
| 353 | - max_sku_id = result.scalar() or 0 | ||
| 354 | - | ||
| 355 | - return max_spu_id, max_sku_id | ||
| 356 | - except Exception as e: | ||
| 357 | - print(f"Warning: Could not get max IDs from database: {e}") | ||
| 358 | - return 0, 0 | ||
| 359 | - | ||
| 360 | - | ||
| 361 | -def main(): | ||
| 362 | - parser = argparse.ArgumentParser(description='Generate test data for Shoplazza tables') | ||
| 363 | - parser.add_argument('--num-spus', type=int, default=100, help='Number of SPUs to generate') | ||
| 364 | - parser.add_argument('--tenant-id', default='1', help='Tenant ID') | ||
| 365 | - parser.add_argument('--start-spu-id', type=int, default=None, help='Starting SPU ID (default: auto-calculate from DB)') | ||
| 366 | - parser.add_argument('--start-sku-id', type=int, default=None, help='Starting SKU ID (default: auto-calculate from DB)') | ||
| 367 | - parser.add_argument('--output', default='test_data.sql', help='Output SQL file') | ||
| 368 | - parser.add_argument('--db-host', help='Database host (for auto-calculating start IDs)') | ||
| 369 | - parser.add_argument('--db-port', type=int, default=3306, help='Database port (default: 3306)') | ||
| 370 | - parser.add_argument('--db-database', help='Database name (for auto-calculating start IDs)') | ||
| 371 | - parser.add_argument('--db-username', help='Database username (for auto-calculating start IDs)') | ||
| 372 | - parser.add_argument('--db-password', help='Database password (for auto-calculating start IDs)') | ||
| 373 | - | ||
| 374 | - args = parser.parse_args() | ||
| 375 | - | ||
| 376 | - # Auto-calculate start IDs if not provided and DB config available | ||
| 377 | - start_spu_id = args.start_spu_id | ||
| 378 | - start_sku_id = args.start_sku_id | ||
| 379 | - | ||
| 380 | - if (start_spu_id is None or start_sku_id is None) and args.db_host and args.db_database and args.db_username and args.db_password: | ||
| 381 | - print("Auto-calculating start IDs from database...") | ||
| 382 | - db_config = { | ||
| 383 | - 'host': args.db_host, | ||
| 384 | - 'port': args.db_port, | ||
| 385 | - 'database': args.db_database, | ||
| 386 | - 'username': args.db_username, | ||
| 387 | - 'password': args.db_password | ||
| 388 | - } | ||
| 389 | - max_spu_id, max_sku_id = get_max_ids_from_db(db_config) | ||
| 390 | - if start_spu_id is None: | ||
| 391 | - start_spu_id = max_spu_id + 1 | ||
| 392 | - if start_sku_id is None: | ||
| 393 | - start_sku_id = max_sku_id + 1 | ||
| 394 | - print(f" Max SPU ID in DB: {max_spu_id}, using start SPU ID: {start_spu_id}") | ||
| 395 | - print(f" Max SKU ID in DB: {max_sku_id}, using start SKU ID: {start_sku_id}") | ||
| 396 | - else: | ||
| 397 | - if start_spu_id is None: | ||
| 398 | - start_spu_id = 1 | ||
| 399 | - if start_sku_id is None: | ||
| 400 | - start_sku_id = 1 | ||
| 401 | - print(f"Using start SPU ID: {start_spu_id}, start SKU ID: {start_sku_id}") | ||
| 402 | - | ||
| 403 | - print(f"Generating {args.num_spus} SPUs with skus...") | ||
| 404 | - | ||
| 405 | - # Generate SPU data | ||
| 406 | - spus = generate_spu_data(args.num_spus, args.tenant_id, start_spu_id) | ||
| 407 | - print(f"Generated {len(spus)} SPUs") | ||
| 408 | - | ||
| 409 | - # Generate SKU data | ||
| 410 | - skus = generate_sku_data(spus, start_sku_id) | ||
| 411 | - print(f"Generated {len(skus)} SKUs") | ||
| 412 | - | ||
| 413 | - # Generate SQL file | ||
| 414 | - generate_sql_inserts(spus, skus, args.output) | ||
| 415 | - print(f"SQL file generated: {args.output}") | ||
| 416 | - | ||
| 417 | - | ||
| 418 | -if __name__ == '__main__': | ||
| 419 | - import json | ||
| 420 | - main() | ||
| 421 | - |
scripts/generate_test_summary.py deleted
| @@ -1,179 +0,0 @@ | @@ -1,179 +0,0 @@ | ||
| 1 | -#!/usr/bin/env python3 | ||
| 2 | -""" | ||
| 3 | -生成测试摘要脚本 | ||
| 4 | - | ||
| 5 | -用于CI/CD流水线中汇总所有测试结果 | ||
| 6 | -""" | ||
| 7 | - | ||
| 8 | -import json | ||
| 9 | -import os | ||
| 10 | -import sys | ||
| 11 | -import glob | ||
| 12 | -from pathlib import Path | ||
| 13 | -from datetime import datetime | ||
| 14 | -from typing import Dict, Any, List | ||
| 15 | - | ||
| 16 | - | ||
| 17 | -def collect_test_results() -> Dict[str, Any]: | ||
| 18 | - """收集所有测试结果""" | ||
| 19 | - results = { | ||
| 20 | - 'timestamp': datetime.now().isoformat(), | ||
| 21 | - 'suites': {}, | ||
| 22 | - 'summary': { | ||
| 23 | - 'total_tests': 0, | ||
| 24 | - 'passed': 0, | ||
| 25 | - 'failed': 0, | ||
| 26 | - 'skipped': 0, | ||
| 27 | - 'errors': 0, | ||
| 28 | - 'total_duration': 0.0 | ||
| 29 | - } | ||
| 30 | - } | ||
| 31 | - | ||
| 32 | - # 查找所有测试结果文件 | ||
| 33 | - test_files = glob.glob('*_test_results.json') | ||
| 34 | - | ||
| 35 | - for test_file in test_files: | ||
| 36 | - try: | ||
| 37 | - with open(test_file, 'r', encoding='utf-8') as f: | ||
| 38 | - test_data = json.load(f) | ||
| 39 | - | ||
| 40 | - suite_name = test_file.replace('_test_results.json', '') | ||
| 41 | - | ||
| 42 | - if 'summary' in test_data: | ||
| 43 | - summary = test_data['summary'] | ||
| 44 | - results['suites'][suite_name] = { | ||
| 45 | - 'total': summary.get('total', 0), | ||
| 46 | - 'passed': summary.get('passed', 0), | ||
| 47 | - 'failed': summary.get('failed', 0), | ||
| 48 | - 'skipped': summary.get('skipped', 0), | ||
| 49 | - 'errors': summary.get('error', 0), | ||
| 50 | - 'duration': summary.get('duration', 0.0) | ||
| 51 | - } | ||
| 52 | - | ||
| 53 | - # 更新总体统计 | ||
| 54 | - results['summary']['total_tests'] += summary.get('total', 0) | ||
| 55 | - results['summary']['passed'] += summary.get('passed', 0) | ||
| 56 | - results['summary']['failed'] += summary.get('failed', 0) | ||
| 57 | - results['summary']['skipped'] += summary.get('skipped', 0) | ||
| 58 | - results['summary']['errors'] += summary.get('error', 0) | ||
| 59 | - results['summary']['total_duration'] += summary.get('duration', 0.0) | ||
| 60 | - | ||
| 61 | - except Exception as e: | ||
| 62 | - print(f"Error reading {test_file}: {e}") | ||
| 63 | - continue | ||
| 64 | - | ||
| 65 | - # 计算成功率 | ||
| 66 | - if results['summary']['total_tests'] > 0: | ||
| 67 | - results['summary']['success_rate'] = ( | ||
| 68 | - results['summary']['passed'] / results['summary']['total_tests'] * 100 | ||
| 69 | - ) | ||
| 70 | - else: | ||
| 71 | - results['summary']['success_rate'] = 0.0 | ||
| 72 | - | ||
| 73 | - return results | ||
| 74 | - | ||
| 75 | - | ||
| 76 | -def generate_text_report(results: Dict[str, Any]) -> str: | ||
| 77 | - """生成文本格式的测试报告""" | ||
| 78 | - lines = [] | ||
| 79 | - | ||
| 80 | - # 标题 | ||
| 81 | - lines.append("=" * 60) | ||
| 82 | - lines.append("搜索引擎自动化测试报告") | ||
| 83 | - lines.append("=" * 60) | ||
| 84 | - lines.append(f"时间: {results['timestamp']}") | ||
| 85 | - lines.append("") | ||
| 86 | - | ||
| 87 | - # 摘要 | ||
| 88 | - summary = results['summary'] | ||
| 89 | - lines.append("📊 测试摘要") | ||
| 90 | - lines.append("-" * 30) | ||
| 91 | - lines.append(f"总测试数: {summary['total_tests']}") | ||
| 92 | - lines.append(f"✅ 通过: {summary['passed']}") | ||
| 93 | - lines.append(f"❌ 失败: {summary['failed']}") | ||
| 94 | - lines.append(f"⏭️ 跳过: {summary['skipped']}") | ||
| 95 | - lines.append(f"🚨 错误: {summary['errors']}") | ||
| 96 | - lines.append(f"📈 成功率: {summary['success_rate']:.1f}%") | ||
| 97 | - lines.append(f"⏱️ 总耗时: {summary['total_duration']:.2f}秒") | ||
| 98 | - lines.append("") | ||
| 99 | - | ||
| 100 | - # 状态判断 | ||
| 101 | - if summary['failed'] == 0 and summary['errors'] == 0: | ||
| 102 | - lines.append("🎉 所有测试都通过了!") | ||
| 103 | - else: | ||
| 104 | - lines.append("⚠️ 存在失败的测试,请查看详细日志。") | ||
| 105 | - lines.append("") | ||
| 106 | - | ||
| 107 | - # 各测试套件详情 | ||
| 108 | - if results['suites']: | ||
| 109 | - lines.append("📋 测试套件详情") | ||
| 110 | - lines.append("-" * 30) | ||
| 111 | - | ||
| 112 | - for suite_name, suite_data in results['suites'].items(): | ||
| 113 | - lines.append(f"\n{suite_name.upper()}:") | ||
| 114 | - lines.append(f" 总数: {suite_data['total']}") | ||
| 115 | - lines.append(f" ✅ 通过: {suite_data['passed']}") | ||
| 116 | - lines.append(f" ❌ 失败: {suite_data['failed']}") | ||
| 117 | - lines.append(f" ⏭️ 跳过: {suite_data['skipped']}") | ||
| 118 | - lines.append(f" 🚨 错误: {suite_data['errors']}") | ||
| 119 | - lines.append(f" ⏱️ 耗时: {suite_data['duration']:.2f}秒") | ||
| 120 | - | ||
| 121 | - # 添加状态图标 | ||
| 122 | - if suite_data['failed'] == 0 and suite_data['errors'] == 0: | ||
| 123 | - lines.append(f" 状态: ✅ 全部通过") | ||
| 124 | - else: | ||
| 125 | - lines.append(f" 状态: ❌ 存在问题") | ||
| 126 | - | ||
| 127 | - lines.append("") | ||
| 128 | - lines.append("=" * 60) | ||
| 129 | - | ||
| 130 | - return "\n".join(lines) | ||
| 131 | - | ||
| 132 | - | ||
| 133 | -def generate_json_report(results: Dict[str, Any]) -> str: | ||
| 134 | - """生成JSON格式的测试报告""" | ||
| 135 | - return json.dumps(results, indent=2, ensure_ascii=False) | ||
| 136 | - | ||
| 137 | - | ||
| 138 | -def main(): | ||
| 139 | - """主函数""" | ||
| 140 | - # 收集测试结果 | ||
| 141 | - print("收集测试结果...") | ||
| 142 | - results = collect_test_results() | ||
| 143 | - | ||
| 144 | - # 生成报告 | ||
| 145 | - print("生成测试报告...") | ||
| 146 | - text_report = generate_text_report(results) | ||
| 147 | - json_report = generate_json_report(results) | ||
| 148 | - | ||
| 149 | - # 保存报告 | ||
| 150 | - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | ||
| 151 | - | ||
| 152 | - # 文本报告 | ||
| 153 | - text_file = f"final_test_report.txt" | ||
| 154 | - with open(text_file, 'w', encoding='utf-8') as f: | ||
| 155 | - f.write(text_report) | ||
| 156 | - | ||
| 157 | - # JSON报告 | ||
| 158 | - json_file = f"final_test_report.json" | ||
| 159 | - with open(json_file, 'w', encoding='utf-8') as f: | ||
| 160 | - f.write(json_report) | ||
| 161 | - | ||
| 162 | - print(f"测试报告已生成:") | ||
| 163 | - print(f" 文本报告: {text_file}") | ||
| 164 | - print(f" JSON报告: {json_file}") | ||
| 165 | - | ||
| 166 | - # 输出摘要到控制台 | ||
| 167 | - print("\n" + "=" * 60) | ||
| 168 | - print(text_report) | ||
| 169 | - | ||
| 170 | - # 返回退出码 | ||
| 171 | - summary = results['summary'] | ||
| 172 | - if summary['failed'] > 0 or summary['errors'] > 0: | ||
| 173 | - return 1 | ||
| 174 | - else: | ||
| 175 | - return 0 | ||
| 176 | - | ||
| 177 | - | ||
| 178 | -if __name__ == "__main__": | ||
| 179 | - sys.exit(main()) | ||
| 180 | \ No newline at end of file | 0 | \ No newline at end of file |
scripts/import_tenant2_csv.py renamed to scripts/indexer__old_2025_11/import_tenant2_csv.py
scripts/import_test_data.py renamed to scripts/indexer__old_2025_11/import_test_data.py
scripts/ingest.sh renamed to scripts/indexer__old_2025_11/ingest.sh
scripts/ingest_shoplazza.py renamed to scripts/indexer__old_2025_11/ingest_shoplazza.py
scripts/recreate_and_import.py renamed to scripts/indexer__old_2025_11/recreate_and_import.py
scripts/run_tests.py deleted
| @@ -1,705 +0,0 @@ | @@ -1,705 +0,0 @@ | ||
| 1 | -#!/usr/bin/env python3 | ||
| 2 | -""" | ||
| 3 | -测试执行脚本 | ||
| 4 | - | ||
| 5 | -运行完整的测试流水线,包括: | ||
| 6 | -- 环境检查 | ||
| 7 | -- 单元测试 | ||
| 8 | -- 集成测试 | ||
| 9 | -- 性能测试 | ||
| 10 | -- 测试报告生成 | ||
| 11 | -""" | ||
| 12 | - | ||
| 13 | -import os | ||
| 14 | -import sys | ||
| 15 | -import subprocess | ||
| 16 | -import time | ||
| 17 | -import json | ||
| 18 | -import argparse | ||
| 19 | -import logging | ||
| 20 | -from pathlib import Path | ||
| 21 | -from typing import Dict, List, Optional, Any | ||
| 22 | -from dataclasses import dataclass, asdict | ||
| 23 | -from datetime import datetime | ||
| 24 | - | ||
| 25 | - | ||
| 26 | -# 添加项目根目录到Python路径 | ||
| 27 | -project_root = Path(__file__).parent.parent | ||
| 28 | -sys.path.insert(0, str(project_root)) | ||
| 29 | - | ||
| 30 | - | ||
| 31 | -@dataclass | ||
| 32 | -class TestResult: | ||
| 33 | - """测试结果数据结构""" | ||
| 34 | - name: str | ||
| 35 | - status: str # "passed", "failed", "skipped", "error" | ||
| 36 | - duration: float | ||
| 37 | - details: Optional[Dict[str, Any]] = None | ||
| 38 | - output: Optional[str] = None | ||
| 39 | - error: Optional[str] = None | ||
| 40 | - | ||
| 41 | - | ||
| 42 | -@dataclass | ||
| 43 | -class TestSuiteResult: | ||
| 44 | - """测试套件结果""" | ||
| 45 | - name: str | ||
| 46 | - total_tests: int | ||
| 47 | - passed: int | ||
| 48 | - failed: int | ||
| 49 | - skipped: int | ||
| 50 | - errors: int | ||
| 51 | - duration: float | ||
| 52 | - results: List[TestResult] | ||
| 53 | - | ||
| 54 | - | ||
| 55 | -class TestRunner: | ||
| 56 | - """测试运行器""" | ||
| 57 | - | ||
| 58 | - def __init__(self, config: Dict[str, Any]): | ||
| 59 | - self.config = config | ||
| 60 | - self.logger = self._setup_logger() | ||
| 61 | - self.results: List[TestSuiteResult] = [] | ||
| 62 | - self.start_time = time.time() | ||
| 63 | - | ||
| 64 | - def _setup_logger(self) -> logging.Logger: | ||
| 65 | - """设置日志记录器""" | ||
| 66 | - log_level = getattr(logging, self.config.get('log_level', 'INFO').upper()) | ||
| 67 | - logging.basicConfig( | ||
| 68 | - level=log_level, | ||
| 69 | - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', | ||
| 70 | - handlers=[ | ||
| 71 | - logging.StreamHandler(), | ||
| 72 | - logging.FileHandler( | ||
| 73 | - project_root / 'test_logs' / f'test_run_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log' | ||
| 74 | - ) | ||
| 75 | - ] | ||
| 76 | - ) | ||
| 77 | - return logging.getLogger(__name__) | ||
| 78 | - | ||
| 79 | - def _run_command(self, cmd: List[str], cwd: Optional[Path] = None, env: Optional[Dict[str, str]] = None) -> subprocess.CompletedProcess: | ||
| 80 | - """运行命令""" | ||
| 81 | - try: | ||
| 82 | - self.logger.info(f"执行命令: {' '.join(cmd)}") | ||
| 83 | - | ||
| 84 | - # 设置环境变量 | ||
| 85 | - process_env = os.environ.copy() | ||
| 86 | - if env: | ||
| 87 | - process_env.update(env) | ||
| 88 | - | ||
| 89 | - result = subprocess.run( | ||
| 90 | - cmd, | ||
| 91 | - cwd=cwd or project_root, | ||
| 92 | - env=process_env, | ||
| 93 | - capture_output=True, | ||
| 94 | - text=True, | ||
| 95 | - timeout=self.config.get('test_timeout', 300) | ||
| 96 | - ) | ||
| 97 | - | ||
| 98 | - self.logger.debug(f"命令返回码: {result.returncode}") | ||
| 99 | - if result.stdout: | ||
| 100 | - self.logger.debug(f"标准输出: {result.stdout[:500]}...") | ||
| 101 | - if result.stderr: | ||
| 102 | - self.logger.debug(f"标准错误: {result.stderr[:500]}...") | ||
| 103 | - | ||
| 104 | - return result | ||
| 105 | - | ||
| 106 | - except subprocess.TimeoutExpired: | ||
| 107 | - self.logger.error(f"命令执行超时: {' '.join(cmd)}") | ||
| 108 | - raise | ||
| 109 | - except Exception as e: | ||
| 110 | - self.logger.error(f"命令执行失败: {e}") | ||
| 111 | - raise | ||
| 112 | - | ||
| 113 | - def check_environment(self) -> bool: | ||
| 114 | - """检查测试环境""" | ||
| 115 | - self.logger.info("检查测试环境...") | ||
| 116 | - | ||
| 117 | - checks = [] | ||
| 118 | - | ||
| 119 | - # 检查Python环境 | ||
| 120 | - try: | ||
| 121 | - python_version = sys.version | ||
| 122 | - self.logger.info(f"Python版本: {python_version}") | ||
| 123 | - checks.append(("Python", True, f"版本 {python_version}")) | ||
| 124 | - except Exception as e: | ||
| 125 | - checks.append(("Python", False, str(e))) | ||
| 126 | - | ||
| 127 | - # 检查conda环境 | ||
| 128 | - try: | ||
| 129 | - result = self._run_command(['conda', '--version']) | ||
| 130 | - if result.returncode == 0: | ||
| 131 | - conda_version = result.stdout.strip() | ||
| 132 | - self.logger.info(f"Conda版本: {conda_version}") | ||
| 133 | - checks.append(("Conda", True, conda_version)) | ||
| 134 | - else: | ||
| 135 | - checks.append(("Conda", False, "未找到conda")) | ||
| 136 | - except Exception as e: | ||
| 137 | - checks.append(("Conda", False, str(e))) | ||
| 138 | - | ||
| 139 | - # 检查依赖包 | ||
| 140 | - required_packages = [ | ||
| 141 | - 'pytest', 'fastapi', 'elasticsearch', 'numpy', | ||
| 142 | - 'torch', 'transformers', 'pyyaml' | ||
| 143 | - ] | ||
| 144 | - | ||
| 145 | - for package in required_packages: | ||
| 146 | - try: | ||
| 147 | - result = self._run_command(['python', '-c', f'import {package}']) | ||
| 148 | - if result.returncode == 0: | ||
| 149 | - checks.append((package, True, "已安装")) | ||
| 150 | - else: | ||
| 151 | - checks.append((package, False, "导入失败")) | ||
| 152 | - except Exception as e: | ||
| 153 | - checks.append((package, False, str(e))) | ||
| 154 | - | ||
| 155 | - # 检查Elasticsearch | ||
| 156 | - try: | ||
| 157 | - es_host = os.getenv('ES_HOST', 'http://localhost:9200') | ||
| 158 | - result = self._run_command(['curl', '-s', f'{es_host}/_cluster/health']) | ||
| 159 | - if result.returncode == 0: | ||
| 160 | - health_data = json.loads(result.stdout) | ||
| 161 | - status = health_data.get('status', 'unknown') | ||
| 162 | - self.logger.info(f"Elasticsearch状态: {status}") | ||
| 163 | - checks.append(("Elasticsearch", True, f"状态: {status}")) | ||
| 164 | - else: | ||
| 165 | - checks.append(("Elasticsearch", False, "连接失败")) | ||
| 166 | - except Exception as e: | ||
| 167 | - checks.append(("Elasticsearch", False, str(e))) | ||
| 168 | - | ||
| 169 | - # 检查API服务 | ||
| 170 | - try: | ||
| 171 | - api_host = os.getenv('API_HOST', '127.0.0.1') | ||
| 172 | - api_port = os.getenv('API_PORT', '6003') | ||
| 173 | - result = self._run_command(['curl', '-s', f'http://{api_host}:{api_port}/health']) | ||
| 174 | - if result.returncode == 0: | ||
| 175 | - health_data = json.loads(result.stdout) | ||
| 176 | - status = health_data.get('status', 'unknown') | ||
| 177 | - self.logger.info(f"API服务状态: {status}") | ||
| 178 | - checks.append(("API服务", True, f"状态: {status}")) | ||
| 179 | - else: | ||
| 180 | - checks.append(("API服务", False, "连接失败")) | ||
| 181 | - except Exception as e: | ||
| 182 | - checks.append(("API服务", False, str(e))) | ||
| 183 | - | ||
| 184 | - # 输出检查结果 | ||
| 185 | - self.logger.info("环境检查结果:") | ||
| 186 | - all_passed = True | ||
| 187 | - for name, passed, details in checks: | ||
| 188 | - status = "✓" if passed else "✗" | ||
| 189 | - self.logger.info(f" {status} {name}: {details}") | ||
| 190 | - if not passed: | ||
| 191 | - all_passed = False | ||
| 192 | - | ||
| 193 | - return all_passed | ||
| 194 | - | ||
| 195 | - def run_unit_tests(self) -> TestSuiteResult: | ||
| 196 | - """运行单元测试""" | ||
| 197 | - self.logger.info("运行单元测试...") | ||
| 198 | - | ||
| 199 | - start_time = time.time() | ||
| 200 | - cmd = [ | ||
| 201 | - 'python', '-m', 'pytest', | ||
| 202 | - 'tests/unit/', | ||
| 203 | - '-v', | ||
| 204 | - '--tb=short', | ||
| 205 | - '--json-report', | ||
| 206 | - '--json-report-file=test_logs/unit_test_results.json' | ||
| 207 | - ] | ||
| 208 | - | ||
| 209 | - try: | ||
| 210 | - result = self._run_command(cmd) | ||
| 211 | - duration = time.time() - start_time | ||
| 212 | - | ||
| 213 | - # 解析测试结果 | ||
| 214 | - if result.returncode == 0: | ||
| 215 | - status = "passed" | ||
| 216 | - else: | ||
| 217 | - status = "failed" | ||
| 218 | - | ||
| 219 | - # 尝试解析JSON报告 | ||
| 220 | - test_results = [] | ||
| 221 | - passed = failed = skipped = errors = 0 | ||
| 222 | - | ||
| 223 | - try: | ||
| 224 | - with open(project_root / 'test_logs' / 'unit_test_results.json', 'r') as f: | ||
| 225 | - report_data = json.load(f) | ||
| 226 | - | ||
| 227 | - summary = report_data.get('summary', {}) | ||
| 228 | - total = summary.get('total', 0) | ||
| 229 | - passed = summary.get('passed', 0) | ||
| 230 | - failed = summary.get('failed', 0) | ||
| 231 | - skipped = summary.get('skipped', 0) | ||
| 232 | - errors = summary.get('error', 0) | ||
| 233 | - | ||
| 234 | - # 获取详细结果 | ||
| 235 | - for test in report_data.get('tests', []): | ||
| 236 | - test_results.append(TestResult( | ||
| 237 | - name=test.get('nodeid', ''), | ||
| 238 | - status=test.get('outcome', 'unknown'), | ||
| 239 | - duration=test.get('duration', 0.0), | ||
| 240 | - details=test | ||
| 241 | - )) | ||
| 242 | - | ||
| 243 | - except Exception as e: | ||
| 244 | - self.logger.warning(f"无法解析单元测试JSON报告: {e}") | ||
| 245 | - | ||
| 246 | - suite_result = TestSuiteResult( | ||
| 247 | - name="单元测试", | ||
| 248 | - total_tests=passed + failed + skipped + errors, | ||
| 249 | - passed=passed, | ||
| 250 | - failed=failed, | ||
| 251 | - skipped=skipped, | ||
| 252 | - errors=errors, | ||
| 253 | - duration=duration, | ||
| 254 | - results=test_results | ||
| 255 | - ) | ||
| 256 | - | ||
| 257 | - self.results.append(suite_result) | ||
| 258 | - self.logger.info(f"单元测试完成: {suite_result.total_tests}个测试, " | ||
| 259 | - f"{suite_result.passed}通过, {suite_result.failed}失败, " | ||
| 260 | - f"{suite_result.skipped}跳过, {suite_result.errors}错误") | ||
| 261 | - | ||
| 262 | - return suite_result | ||
| 263 | - | ||
| 264 | - except Exception as e: | ||
| 265 | - self.logger.error(f"单元测试执行失败: {e}") | ||
| 266 | - raise | ||
| 267 | - | ||
| 268 | - def run_integration_tests(self) -> TestSuiteResult: | ||
| 269 | - """运行集成测试""" | ||
| 270 | - self.logger.info("运行集成测试...") | ||
| 271 | - | ||
| 272 | - start_time = time.time() | ||
| 273 | - cmd = [ | ||
| 274 | - 'python', '-m', 'pytest', | ||
| 275 | - 'tests/integration/', | ||
| 276 | - '-v', | ||
| 277 | - '--tb=short', | ||
| 278 | - '-m', 'not slow', # 排除慢速测试 | ||
| 279 | - '--json-report', | ||
| 280 | - '--json-report-file=test_logs/integration_test_results.json' | ||
| 281 | - ] | ||
| 282 | - | ||
| 283 | - try: | ||
| 284 | - result = self._run_command(cmd) | ||
| 285 | - duration = time.time() - start_time | ||
| 286 | - | ||
| 287 | - # 解析测试结果 | ||
| 288 | - if result.returncode == 0: | ||
| 289 | - status = "passed" | ||
| 290 | - else: | ||
| 291 | - status = "failed" | ||
| 292 | - | ||
| 293 | - # 尝试解析JSON报告 | ||
| 294 | - test_results = [] | ||
| 295 | - passed = failed = skipped = errors = 0 | ||
| 296 | - | ||
| 297 | - try: | ||
| 298 | - with open(project_root / 'test_logs' / 'integration_test_results.json', 'r') as f: | ||
| 299 | - report_data = json.load(f) | ||
| 300 | - | ||
| 301 | - summary = report_data.get('summary', {}) | ||
| 302 | - total = summary.get('total', 0) | ||
| 303 | - passed = summary.get('passed', 0) | ||
| 304 | - failed = summary.get('failed', 0) | ||
| 305 | - skipped = summary.get('skipped', 0) | ||
| 306 | - errors = summary.get('error', 0) | ||
| 307 | - | ||
| 308 | - for test in report_data.get('tests', []): | ||
| 309 | - test_results.append(TestResult( | ||
| 310 | - name=test.get('nodeid', ''), | ||
| 311 | - status=test.get('outcome', 'unknown'), | ||
| 312 | - duration=test.get('duration', 0.0), | ||
| 313 | - details=test | ||
| 314 | - )) | ||
| 315 | - | ||
| 316 | - except Exception as e: | ||
| 317 | - self.logger.warning(f"无法解析集成测试JSON报告: {e}") | ||
| 318 | - | ||
| 319 | - suite_result = TestSuiteResult( | ||
| 320 | - name="集成测试", | ||
| 321 | - total_tests=passed + failed + skipped + errors, | ||
| 322 | - passed=passed, | ||
| 323 | - failed=failed, | ||
| 324 | - skipped=skipped, | ||
| 325 | - errors=errors, | ||
| 326 | - duration=duration, | ||
| 327 | - results=test_results | ||
| 328 | - ) | ||
| 329 | - | ||
| 330 | - self.results.append(suite_result) | ||
| 331 | - self.logger.info(f"集成测试完成: {suite_result.total_tests}个测试, " | ||
| 332 | - f"{suite_result.passed}通过, {suite_result.failed}失败, " | ||
| 333 | - f"{suite_result.skipped}跳过, {suite_result.errors}错误") | ||
| 334 | - | ||
| 335 | - return suite_result | ||
| 336 | - | ||
| 337 | - except Exception as e: | ||
| 338 | - self.logger.error(f"集成测试执行失败: {e}") | ||
| 339 | - raise | ||
| 340 | - | ||
| 341 | - def run_api_tests(self) -> TestSuiteResult: | ||
| 342 | - """运行API测试""" | ||
| 343 | - self.logger.info("运行API测试...") | ||
| 344 | - | ||
| 345 | - start_time = time.time() | ||
| 346 | - cmd = [ | ||
| 347 | - 'python', '-m', 'pytest', | ||
| 348 | - 'tests/integration/test_api_integration.py', | ||
| 349 | - '-v', | ||
| 350 | - '--tb=short', | ||
| 351 | - '--json-report', | ||
| 352 | - '--json-report-file=test_logs/api_test_results.json' | ||
| 353 | - ] | ||
| 354 | - | ||
| 355 | - try: | ||
| 356 | - result = self._run_command(cmd) | ||
| 357 | - duration = time.time() - start_time | ||
| 358 | - | ||
| 359 | - # 解析测试结果 | ||
| 360 | - if result.returncode == 0: | ||
| 361 | - status = "passed" | ||
| 362 | - else: | ||
| 363 | - status = "failed" | ||
| 364 | - | ||
| 365 | - # 尝试解析JSON报告 | ||
| 366 | - test_results = [] | ||
| 367 | - passed = failed = skipped = errors = 0 | ||
| 368 | - | ||
| 369 | - try: | ||
| 370 | - with open(project_root / 'test_logs' / 'api_test_results.json', 'r') as f: | ||
| 371 | - report_data = json.load(f) | ||
| 372 | - | ||
| 373 | - summary = report_data.get('summary', {}) | ||
| 374 | - total = summary.get('total', 0) | ||
| 375 | - passed = summary.get('passed', 0) | ||
| 376 | - failed = summary.get('failed', 0) | ||
| 377 | - skipped = summary.get('skipped', 0) | ||
| 378 | - errors = summary.get('error', 0) | ||
| 379 | - | ||
| 380 | - for test in report_data.get('tests', []): | ||
| 381 | - test_results.append(TestResult( | ||
| 382 | - name=test.get('nodeid', ''), | ||
| 383 | - status=test.get('outcome', 'unknown'), | ||
| 384 | - duration=test.get('duration', 0.0), | ||
| 385 | - details=test | ||
| 386 | - )) | ||
| 387 | - | ||
| 388 | - except Exception as e: | ||
| 389 | - self.logger.warning(f"无法解析API测试JSON报告: {e}") | ||
| 390 | - | ||
| 391 | - suite_result = TestSuiteResult( | ||
| 392 | - name="API测试", | ||
| 393 | - total_tests=passed + failed + skipped + errors, | ||
| 394 | - passed=passed, | ||
| 395 | - failed=failed, | ||
| 396 | - skipped=skipped, | ||
| 397 | - errors=errors, | ||
| 398 | - duration=duration, | ||
| 399 | - results=test_results | ||
| 400 | - ) | ||
| 401 | - | ||
| 402 | - self.results.append(suite_result) | ||
| 403 | - self.logger.info(f"API测试完成: {suite_result.total_tests}个测试, " | ||
| 404 | - f"{suite_result.passed}通过, {suite_result.failed}失败, " | ||
| 405 | - f"{suite_result.skipped}跳过, {suite_result.errors}错误") | ||
| 406 | - | ||
| 407 | - return suite_result | ||
| 408 | - | ||
| 409 | - except Exception as e: | ||
| 410 | - self.logger.error(f"API测试执行失败: {e}") | ||
| 411 | - raise | ||
| 412 | - | ||
| 413 | - def run_performance_tests(self) -> TestSuiteResult: | ||
| 414 | - """运行性能测试""" | ||
| 415 | - self.logger.info("运行性能测试...") | ||
| 416 | - | ||
| 417 | - start_time = time.time() | ||
| 418 | - | ||
| 419 | - # 简单的性能测试 - 测试搜索响应时间 | ||
| 420 | - test_queries = [ | ||
| 421 | - "红色连衣裙", | ||
| 422 | - "智能手机", | ||
| 423 | - "笔记本电脑 AND (游戏 OR 办公)", | ||
| 424 | - "无线蓝牙耳机" | ||
| 425 | - ] | ||
| 426 | - | ||
| 427 | - test_results = [] | ||
| 428 | - passed = failed = 0 | ||
| 429 | - | ||
| 430 | - for query in test_queries: | ||
| 431 | - try: | ||
| 432 | - query_start = time.time() | ||
| 433 | - result = self._run_command([ | ||
| 434 | - 'curl', '-s', | ||
| 435 | - f'http://{os.getenv("API_HOST", "127.0.0.1")}:{os.getenv("API_PORT", "6003")}/search', | ||
| 436 | - '-d', f'q={query}' | ||
| 437 | - ]) | ||
| 438 | - query_duration = time.time() - query_start | ||
| 439 | - | ||
| 440 | - if result.returncode == 0: | ||
| 441 | - response_data = json.loads(result.stdout) | ||
| 442 | - took_ms = response_data.get('took_ms', 0) | ||
| 443 | - | ||
| 444 | - # 性能阈值:响应时间不超过2秒 | ||
| 445 | - if took_ms <= 2000: | ||
| 446 | - test_results.append(TestResult( | ||
| 447 | - name=f"搜索性能测试: {query}", | ||
| 448 | - status="passed", | ||
| 449 | - duration=query_duration, | ||
| 450 | - details={"took_ms": took_ms, "response_size": len(result.stdout)} | ||
| 451 | - )) | ||
| 452 | - passed += 1 | ||
| 453 | - else: | ||
| 454 | - test_results.append(TestResult( | ||
| 455 | - name=f"搜索性能测试: {query}", | ||
| 456 | - status="failed", | ||
| 457 | - duration=query_duration, | ||
| 458 | - details={"took_ms": took_ms, "threshold": 2000} | ||
| 459 | - )) | ||
| 460 | - failed += 1 | ||
| 461 | - else: | ||
| 462 | - test_results.append(TestResult( | ||
| 463 | - name=f"搜索性能测试: {query}", | ||
| 464 | - status="failed", | ||
| 465 | - duration=query_duration, | ||
| 466 | - error=result.stderr | ||
| 467 | - )) | ||
| 468 | - failed += 1 | ||
| 469 | - | ||
| 470 | - except Exception as e: | ||
| 471 | - test_results.append(TestResult( | ||
| 472 | - name=f"搜索性能测试: {query}", | ||
| 473 | - status="error", | ||
| 474 | - duration=0.0, | ||
| 475 | - error=str(e) | ||
| 476 | - )) | ||
| 477 | - failed += 1 | ||
| 478 | - | ||
| 479 | - duration = time.time() - start_time | ||
| 480 | - | ||
| 481 | - suite_result = TestSuiteResult( | ||
| 482 | - name="性能测试", | ||
| 483 | - total_tests=len(test_results), | ||
| 484 | - passed=passed, | ||
| 485 | - failed=failed, | ||
| 486 | - skipped=0, | ||
| 487 | - errors=0, | ||
| 488 | - duration=duration, | ||
| 489 | - results=test_results | ||
| 490 | - ) | ||
| 491 | - | ||
| 492 | - self.results.append(suite_result) | ||
| 493 | - self.logger.info(f"性能测试完成: {suite_result.total_tests}个测试, " | ||
| 494 | - f"{suite_result.passed}通过, {suite_result.failed}失败") | ||
| 495 | - | ||
| 496 | - return suite_result | ||
| 497 | - | ||
| 498 | - def generate_report(self) -> str: | ||
| 499 | - """生成测试报告""" | ||
| 500 | - self.logger.info("生成测试报告...") | ||
| 501 | - | ||
| 502 | - # 计算总体统计 | ||
| 503 | - total_tests = sum(suite.total_tests for suite in self.results) | ||
| 504 | - total_passed = sum(suite.passed for suite in self.results) | ||
| 505 | - total_failed = sum(suite.failed for suite in self.results) | ||
| 506 | - total_skipped = sum(suite.skipped for suite in self.results) | ||
| 507 | - total_errors = sum(suite.errors for suite in self.results) | ||
| 508 | - total_duration = sum(suite.duration for suite in self.results) | ||
| 509 | - | ||
| 510 | - # 生成报告数据 | ||
| 511 | - report_data = { | ||
| 512 | - "timestamp": datetime.now().isoformat(), | ||
| 513 | - "summary": { | ||
| 514 | - "total_tests": total_tests, | ||
| 515 | - "passed": total_passed, | ||
| 516 | - "failed": total_failed, | ||
| 517 | - "skipped": total_skipped, | ||
| 518 | - "errors": total_errors, | ||
| 519 | - "success_rate": (total_passed / total_tests * 100) if total_tests > 0 else 0, | ||
| 520 | - "total_duration": total_duration | ||
| 521 | - }, | ||
| 522 | - "suites": [asdict(suite) for suite in self.results] | ||
| 523 | - } | ||
| 524 | - | ||
| 525 | - # 保存JSON报告 | ||
| 526 | - report_file = project_root / 'test_logs' / f'test_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.json' | ||
| 527 | - with open(report_file, 'w', encoding='utf-8') as f: | ||
| 528 | - json.dump(report_data, f, indent=2, ensure_ascii=False) | ||
| 529 | - | ||
| 530 | - # 生成文本报告 | ||
| 531 | - text_report = self._generate_text_report(report_data) | ||
| 532 | - | ||
| 533 | - report_file_text = project_root / 'test_logs' / f'test_report_{datetime.now().strftime("%Y%m%d_%H%M%S")}.txt' | ||
| 534 | - with open(report_file_text, 'w', encoding='utf-8') as f: | ||
| 535 | - f.write(text_report) | ||
| 536 | - | ||
| 537 | - self.logger.info(f"测试报告已保存: {report_file}") | ||
| 538 | - self.logger.info(f"文本报告已保存: {report_file_text}") | ||
| 539 | - | ||
| 540 | - return text_report | ||
| 541 | - | ||
| 542 | - def _generate_text_report(self, report_data: Dict[str, Any]) -> str: | ||
| 543 | - """生成文本格式的测试报告""" | ||
| 544 | - lines = [] | ||
| 545 | - | ||
| 546 | - # 标题 | ||
| 547 | - lines.append("=" * 60) | ||
| 548 | - lines.append("搜索引擎测试报告") | ||
| 549 | - lines.append("=" * 60) | ||
| 550 | - lines.append(f"时间: {report_data['timestamp']}") | ||
| 551 | - lines.append("") | ||
| 552 | - | ||
| 553 | - # 摘要 | ||
| 554 | - summary = report_data['summary'] | ||
| 555 | - lines.append("测试摘要") | ||
| 556 | - lines.append("-" * 30) | ||
| 557 | - lines.append(f"总测试数: {summary['total_tests']}") | ||
| 558 | - lines.append(f"通过: {summary['passed']}") | ||
| 559 | - lines.append(f"失败: {summary['failed']}") | ||
| 560 | - lines.append(f"跳过: {summary['skipped']}") | ||
| 561 | - lines.append(f"错误: {summary['errors']}") | ||
| 562 | - lines.append(f"成功率: {summary['success_rate']:.1f}%") | ||
| 563 | - lines.append(f"总耗时: {summary['total_duration']:.2f}秒") | ||
| 564 | - lines.append("") | ||
| 565 | - | ||
| 566 | - # 各测试套件详情 | ||
| 567 | - lines.append("测试套件详情") | ||
| 568 | - lines.append("-" * 30) | ||
| 569 | - | ||
| 570 | - for suite in report_data['suites']: | ||
| 571 | - lines.append(f"\n{suite['name']}:") | ||
| 572 | - lines.append(f" 总数: {suite['total_tests']}, 通过: {suite['passed']}, " | ||
| 573 | - f"失败: {suite['failed']}, 跳过: {suite['skipped']}, 错误: {suite['errors']}") | ||
| 574 | - lines.append(f" 耗时: {suite['duration']:.2f}秒") | ||
| 575 | - | ||
| 576 | - # 显示失败的测试 | ||
| 577 | - failed_tests = [r for r in suite['results'] if r['status'] in ['failed', 'error']] | ||
| 578 | - if failed_tests: | ||
| 579 | - lines.append(" 失败的测试:") | ||
| 580 | - for test in failed_tests[:5]: # 只显示前5个 | ||
| 581 | - lines.append(f" - {test['name']}: {test['status']}") | ||
| 582 | - if test.get('error'): | ||
| 583 | - lines.append(f" 错误: {test['error'][:100]}...") | ||
| 584 | - if len(failed_tests) > 5: | ||
| 585 | - lines.append(f" ... 还有 {len(failed_tests) - 5} 个失败的测试") | ||
| 586 | - | ||
| 587 | - return "\n".join(lines) | ||
| 588 | - | ||
| 589 | - def run_all_tests(self) -> bool: | ||
| 590 | - """运行所有测试""" | ||
| 591 | - try: | ||
| 592 | - # 确保日志目录存在 | ||
| 593 | - (project_root / 'test_logs').mkdir(exist_ok=True) | ||
| 594 | - | ||
| 595 | - # 加载环境变量 | ||
| 596 | - env_file = project_root / 'test_env.sh' | ||
| 597 | - if env_file.exists(): | ||
| 598 | - self.logger.info("加载测试环境变量...") | ||
| 599 | - result = self._run_command(['bash', str(env_file)]) | ||
| 600 | - if result.returncode != 0: | ||
| 601 | - self.logger.warning("环境变量加载失败,继续使用默认配置") | ||
| 602 | - | ||
| 603 | - # 检查环境 | ||
| 604 | - if not self.check_environment(): | ||
| 605 | - self.logger.error("环境检查失败,请先启动测试环境") | ||
| 606 | - return False | ||
| 607 | - | ||
| 608 | - # 运行各类测试 | ||
| 609 | - test_suites = [ | ||
| 610 | - ("unit", self.run_unit_tests), | ||
| 611 | - ("integration", self.run_integration_tests), | ||
| 612 | - ("api", self.run_api_tests), | ||
| 613 | - ("performance", self.run_performance_tests) | ||
| 614 | - ] | ||
| 615 | - | ||
| 616 | - failed_suites = [] | ||
| 617 | - | ||
| 618 | - for suite_name, suite_func in test_suites: | ||
| 619 | - if suite_name in self.config.get('skip_suites', []): | ||
| 620 | - self.logger.info(f"跳过 {suite_name} 测试") | ||
| 621 | - continue | ||
| 622 | - | ||
| 623 | - try: | ||
| 624 | - suite_result = suite_func() | ||
| 625 | - if suite_result.failed > 0 or suite_result.errors > 0: | ||
| 626 | - failed_suites.append(suite_name) | ||
| 627 | - except Exception as e: | ||
| 628 | - self.logger.error(f"{suite_name} 测试执行失败: {e}") | ||
| 629 | - failed_suites.append(suite_name) | ||
| 630 | - | ||
| 631 | - # 生成报告 | ||
| 632 | - report = self.generate_report() | ||
| 633 | - print(report) | ||
| 634 | - | ||
| 635 | - # 返回测试结果 | ||
| 636 | - return len(failed_suites) == 0 | ||
| 637 | - | ||
| 638 | - except Exception as e: | ||
| 639 | - self.logger.error(f"测试执行失败: {e}") | ||
| 640 | - return False | ||
| 641 | - | ||
| 642 | - | ||
| 643 | -def main(): | ||
| 644 | - """主函数""" | ||
| 645 | - parser = argparse.ArgumentParser(description="运行搜索引擎测试流水线") | ||
| 646 | - parser.add_argument('--skip-suites', nargs='+', | ||
| 647 | - choices=['unit', 'integration', 'api', 'performance'], | ||
| 648 | - help='跳过指定的测试套件') | ||
| 649 | - parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], | ||
| 650 | - default='INFO', help='日志级别') | ||
| 651 | - parser.add_argument('--test-timeout', type=int, default=300, | ||
| 652 | - help='单个测试超时时间(秒)') | ||
| 653 | - parser.add_argument('--start-env', action='store_true', | ||
| 654 | - help='启动测试环境后运行测试') | ||
| 655 | - parser.add_argument('--stop-env', action='store_true', | ||
| 656 | - help='测试完成后停止测试环境') | ||
| 657 | - | ||
| 658 | - args = parser.parse_args() | ||
| 659 | - | ||
| 660 | - # 配置 | ||
| 661 | - config = { | ||
| 662 | - 'skip_suites': args.skip_suites or [], | ||
| 663 | - 'log_level': args.log_level, | ||
| 664 | - 'test_timeout': args.test_timeout | ||
| 665 | - } | ||
| 666 | - | ||
| 667 | - # 启动环境 | ||
| 668 | - if args.start_env: | ||
| 669 | - print("启动测试环境...") | ||
| 670 | - result = subprocess.run([ | ||
| 671 | - 'bash', str(project_root / 'scripts' / 'start_test_environment.sh') | ||
| 672 | - ], capture_output=True, text=True) | ||
| 673 | - | ||
| 674 | - if result.returncode != 0: | ||
| 675 | - print(f"测试环境启动失败: {result.stderr}") | ||
| 676 | - return 1 | ||
| 677 | - | ||
| 678 | - print("测试环境启动成功") | ||
| 679 | - time.sleep(5) # 等待服务完全启动 | ||
| 680 | - | ||
| 681 | - try: | ||
| 682 | - # 运行测试 | ||
| 683 | - runner = TestRunner(config) | ||
| 684 | - success = runner.run_all_tests() | ||
| 685 | - | ||
| 686 | - if success: | ||
| 687 | - print("\n🎉 所有测试通过!") | ||
| 688 | - return_code = 0 | ||
| 689 | - else: | ||
| 690 | - print("\n❌ 部分测试失败,请查看日志") | ||
| 691 | - return_code = 1 | ||
| 692 | - | ||
| 693 | - finally: | ||
| 694 | - # 停止环境 | ||
| 695 | - if args.stop_env: | ||
| 696 | - print("\n停止测试环境...") | ||
| 697 | - subprocess.run([ | ||
| 698 | - 'bash', str(project_root / 'scripts' / 'stop_test_environment.sh') | ||
| 699 | - ]) | ||
| 700 | - | ||
| 701 | - return return_code | ||
| 702 | - | ||
| 703 | - | ||
| 704 | -if __name__ == "__main__": | ||
| 705 | - sys.exit(main()) | ||
| 706 | \ No newline at end of file | 0 | \ No newline at end of file |
scripts/start_test_environment.sh deleted
| @@ -1,275 +0,0 @@ | @@ -1,275 +0,0 @@ | ||
| 1 | -#!/bin/bash | ||
| 2 | - | ||
| 3 | -# 启动测试环境脚本 | ||
| 4 | -# 用于在commit前自动化测试时启动必要的依赖服务 | ||
| 5 | - | ||
| 6 | -set -e | ||
| 7 | - | ||
| 8 | -# 颜色定义 | ||
| 9 | -RED='\033[0;31m' | ||
| 10 | -GREEN='\033[0;32m' | ||
| 11 | -YELLOW='\033[1;33m' | ||
| 12 | -BLUE='\033[0;34m' | ||
| 13 | -NC='\033[0m' # No Color | ||
| 14 | - | ||
| 15 | -# 配置 | ||
| 16 | -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" | ||
| 17 | -PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" | ||
| 18 | -TEST_LOG_DIR="$PROJECT_ROOT/test_logs" | ||
| 19 | -PID_FILE="$PROJECT_ROOT/test_environment.pid" | ||
| 20 | - | ||
| 21 | -# 日志文件 | ||
| 22 | -LOG_FILE="$TEST_LOG_DIR/test_environment.log" | ||
| 23 | -ES_LOG_FILE="$TEST_LOG_DIR/elasticsearch.log" | ||
| 24 | -API_LOG_FILE="$TEST_LOG_DIR/api_test.log" | ||
| 25 | - | ||
| 26 | -echo -e "${GREEN}========================================${NC}" | ||
| 27 | -echo -e "${GREEN}启动测试环境${NC}" | ||
| 28 | -echo -e "${GREEN}========================================${NC}" | ||
| 29 | - | ||
| 30 | -# 创建日志目录 | ||
| 31 | -mkdir -p "$TEST_LOG_DIR" | ||
| 32 | - | ||
| 33 | -# 检查是否已经运行 | ||
| 34 | -if [ -f "$PID_FILE" ]; then | ||
| 35 | - OLD_PID=$(cat "$PID_FILE") | ||
| 36 | - if ps -p $OLD_PID > /dev/null 2>&1; then | ||
| 37 | - echo -e "${YELLOW}测试环境已在运行 (PID: $OLD_PID)${NC}" | ||
| 38 | - echo -e "${BLUE}如需重启,请先运行: ./scripts/stop_test_environment.sh${NC}" | ||
| 39 | - exit 0 | ||
| 40 | - else | ||
| 41 | - rm -f "$PID_FILE" | ||
| 42 | - fi | ||
| 43 | -fi | ||
| 44 | - | ||
| 45 | -# 激活conda环境 | ||
| 46 | -echo -e "${BLUE}激活conda环境...${NC}" | ||
| 47 | -source /home/tw/miniconda3/etc/profile.d/conda.sh | ||
| 48 | -conda activate searchengine | ||
| 49 | - | ||
| 50 | -# 设置环境变量 | ||
| 51 | -echo -e "${BLUE}设置测试环境变量...${NC}" | ||
| 52 | -export PYTHONPATH="$PROJECT_ROOT:$PYTHONPATH" | ||
| 53 | -export TESTING_MODE=true | ||
| 54 | -export LOG_LEVEL=DEBUG | ||
| 55 | - | ||
| 56 | -# Elasticsearch配置 | ||
| 57 | -export ES_HOST="http://localhost:9200" | ||
| 58 | -export ES_USERNAME="elastic" | ||
| 59 | -export ES_PASSWORD="changeme" | ||
| 60 | - | ||
| 61 | -# API配置 | ||
| 62 | -export API_HOST="127.0.0.1" | ||
| 63 | -export API_PORT="6003" # 使用不同的端口避免冲突 | ||
| 64 | -export TENANT_ID="test_tenant" | ||
| 65 | - | ||
| 66 | -# 测试配置 | ||
| 67 | -export TEST_TIMEOUT=60 | ||
| 68 | -export TEST_RETRY_COUNT=3 | ||
| 69 | - | ||
| 70 | -echo -e "${BLUE}环境配置:${NC}" | ||
| 71 | -echo " ES_HOST: $ES_HOST" | ||
| 72 | -echo " API_HOST: $API_HOST:$API_PORT" | ||
| 73 | -echo " TENANT_ID: $TENANT_ID" | ||
| 74 | -echo " LOG_LEVEL: $LOG_LEVEL" | ||
| 75 | -echo " TESTING_MODE: $TESTING_MODE" | ||
| 76 | - | ||
| 77 | -# 检查Elasticsearch是否运行 | ||
| 78 | -echo -e "${BLUE}检查Elasticsearch状态...${NC}" | ||
| 79 | -if curl -s "$ES_HOST/_cluster/health" > /dev/null; then | ||
| 80 | - echo -e "${GREEN}✓ Elasticsearch正在运行${NC}" | ||
| 81 | -else | ||
| 82 | - echo -e "${YELLOW}⚠ Elasticsearch未运行,尝试启动...${NC}" | ||
| 83 | - | ||
| 84 | - # 尝试启动Elasticsearch(如果安装了本地版本) | ||
| 85 | - if command -v elasticsearch &> /dev/null; then | ||
| 86 | - echo -e "${BLUE}启动本地Elasticsearch...${NC}" | ||
| 87 | - elasticsearch -d -p "$TEST_LOG_DIR/es.pid" | ||
| 88 | - sleep 10 | ||
| 89 | - | ||
| 90 | - # 再次检查 | ||
| 91 | - if curl -s "$ES_HOST/_cluster/health" > /dev/null; then | ||
| 92 | - echo -e "${GREEN}✓ Elasticsearch启动成功${NC}" | ||
| 93 | - else | ||
| 94 | - echo -e "${RED}✗ Elasticsearch启动失败${NC}" | ||
| 95 | - echo -e "${YELLOW}请手动启动Elasticsearch或配置远程ES地址${NC}" | ||
| 96 | - exit 1 | ||
| 97 | - fi | ||
| 98 | - else | ||
| 99 | - echo -e "${RED}✗ 未找到本地Elasticsearch${NC}" | ||
| 100 | - echo -e "${YELLOW}请启动Elasticsearch服务或修改ES_HOST配置${NC}" | ||
| 101 | - exit 1 | ||
| 102 | - fi | ||
| 103 | -fi | ||
| 104 | - | ||
| 105 | -# 等待Elasticsearch就绪 | ||
| 106 | -echo -e "${BLUE}等待Elasticsearch就绪...${NC}" | ||
| 107 | -for i in {1..30}; do | ||
| 108 | - if curl -s "$ES_HOST/_cluster/health?wait_for_status=yellow&timeout=1s" | grep -q '"status":"green\|yellow"'; then | ||
| 109 | - echo -e "${GREEN}✓ Elasticsearch已就绪${NC}" | ||
| 110 | - break | ||
| 111 | - fi | ||
| 112 | - if [ $i -eq 30 ]; then | ||
| 113 | - echo -e "${RED}✗ Elasticsearch就绪超时${NC}" | ||
| 114 | - exit 1 | ||
| 115 | - fi | ||
| 116 | - sleep 1 | ||
| 117 | -done | ||
| 118 | - | ||
| 119 | -# 创建测试索引(如果需要) | ||
| 120 | -echo -e "${BLUE}准备测试数据索引...${NC}" | ||
| 121 | -curl -X PUT "$ES_HOST/test_products" -H 'Content-Type: application/json' -d' | ||
| 122 | -{ | ||
| 123 | - "settings": { | ||
| 124 | - "number_of_shards": 1, | ||
| 125 | - "number_of_replicas": 0, | ||
| 126 | - "analysis": { | ||
| 127 | - "analyzer": { | ||
| 128 | - "ansj": { | ||
| 129 | - "type": "custom", | ||
| 130 | - "tokenizer": "keyword" | ||
| 131 | - } | ||
| 132 | - } | ||
| 133 | - } | ||
| 134 | - }, | ||
| 135 | - "mappings": { | ||
| 136 | - "properties": { | ||
| 137 | - "name": { | ||
| 138 | - "type": "text", | ||
| 139 | - "analyzer": "ansj" | ||
| 140 | - }, | ||
| 141 | - "brand_name": { | ||
| 142 | - "type": "text", | ||
| 143 | - "analyzer": "ansj" | ||
| 144 | - }, | ||
| 145 | - "tags": { | ||
| 146 | - "type": "text", | ||
| 147 | - "analyzer": "ansj" | ||
| 148 | - }, | ||
| 149 | - "price": { | ||
| 150 | - "type": "double" | ||
| 151 | - }, | ||
| 152 | - "category_id": { | ||
| 153 | - "type": "integer" | ||
| 154 | - }, | ||
| 155 | - "spu_id": { | ||
| 156 | - "type": "keyword" | ||
| 157 | - }, | ||
| 158 | - "text_embedding": { | ||
| 159 | - "type": "dense_vector", | ||
| 160 | - "dims": 1024 | ||
| 161 | - } | ||
| 162 | - } | ||
| 163 | - } | ||
| 164 | -}' > /dev/null 2>&1 || echo -e "${YELLOW}索引可能已存在${NC}" | ||
| 165 | - | ||
| 166 | -# 插入测试数据 | ||
| 167 | -echo -e "${BLUE}插入测试数据...${NC}" | ||
| 168 | -curl -X POST "$ES_HOST/test_products/_bulk" -H 'Content-Type: application/json' -d' | ||
| 169 | -{"index": {"_id": "1"}} | ||
| 170 | -{"name": "红色连衣裙", "brand_name": "测试品牌", "tags": ["红色", "连衣裙", "女装"], "price": 299.0, "category_id": 1, "spu_id": "dress_001"} | ||
| 171 | -{"index": {"_id": "2"}} | ||
| 172 | -{"name": "蓝色连衣裙", "brand_name": "测试品牌", "tags": ["蓝色", "连衣裙", "女装"], "price": 399.0, "category_id": 1, "spu_id": "dress_002"} | ||
| 173 | -{"index": {"_id": "3"}} | ||
| 174 | -{"name": "智能手机", "brand_name": "科技品牌", "tags": ["智能", "手机", "数码"], "price": 2999.0, "category_id": 2, "spu_id": "phone_001"} | ||
| 175 | -{"index": {"_id": "4"}} | ||
| 176 | -{"name": "笔记本电脑", "brand_name": "科技品牌", "tags": ["笔记本", "电脑", "办公"], "price": 5999.0, "category_id": 3, "spu_id": "laptop_001"} | ||
| 177 | -' > /dev/null 2>&1 || echo -e "${YELLOW}测试数据可能已存在${NC}" | ||
| 178 | - | ||
| 179 | -# 启动测试API服务 | ||
| 180 | -echo -e "${BLUE}启动测试API服务...${NC}" | ||
| 181 | -cd "$PROJECT_ROOT" | ||
| 182 | - | ||
| 183 | -# 使用后台模式启动API | ||
| 184 | -python -m api.app \ | ||
| 185 | - --host $API_HOST \ | ||
| 186 | - --port $API_PORT \ | ||
| 187 | - --tenant $TENANT_ID \ | ||
| 188 | - --es-host $ES_HOST \ | ||
| 189 | - > "$API_LOG_FILE" 2>&1 & | ||
| 190 | - | ||
| 191 | -API_PID=$! | ||
| 192 | -echo $API_PID > "$PID_FILE" | ||
| 193 | - | ||
| 194 | -# 等待API服务启动 | ||
| 195 | -echo -e "${BLUE}等待API服务启动...${NC}" | ||
| 196 | -for i in {1..30}; do | ||
| 197 | - if curl -s "http://$API_HOST:$API_PORT/health" > /dev/null; then | ||
| 198 | - echo -e "${GREEN}✓ API服务已就绪 (PID: $API_PID)${NC}" | ||
| 199 | - break | ||
| 200 | - fi | ||
| 201 | - if [ $i -eq 30 ]; then | ||
| 202 | - echo -e "${RED}✗ API服务启动超时${NC}" | ||
| 203 | - kill $API_PID 2>/dev/null || true | ||
| 204 | - rm -f "$PID_FILE" | ||
| 205 | - exit 1 | ||
| 206 | - fi | ||
| 207 | - sleep 1 | ||
| 208 | -done | ||
| 209 | - | ||
| 210 | -# 验证测试环境 | ||
| 211 | -echo -e "${BLUE}验证测试环境...${NC}" | ||
| 212 | - | ||
| 213 | -# 测试Elasticsearch连接 | ||
| 214 | -if curl -s "$ES_HOST/_cluster/health" | grep -q '"status":"green\|yellow"'; then | ||
| 215 | - echo -e "${GREEN}✓ Elasticsearch连接正常${NC}" | ||
| 216 | -else | ||
| 217 | - echo -e "${RED}✗ Elasticsearch连接失败${NC}" | ||
| 218 | - exit 1 | ||
| 219 | -fi | ||
| 220 | - | ||
| 221 | -# 测试API健康检查 | ||
| 222 | -if curl -s "http://$API_HOST:$API_PORT/health" | grep -q '"status"'; then | ||
| 223 | - echo -e "${GREEN}✓ API服务健康检查通过${NC}" | ||
| 224 | -else | ||
| 225 | - echo -e "${RED}✗ API服务健康检查失败${NC}" | ||
| 226 | - exit 1 | ||
| 227 | -fi | ||
| 228 | - | ||
| 229 | -# 测试基本搜索功能 | ||
| 230 | -if curl -s "http://$API_HOST:$API_PORT/search?q=红色连衣裙" | grep -q '"hits"'; then | ||
| 231 | - echo -e "${GREEN}✓ 基本搜索功能正常${NC}" | ||
| 232 | -else | ||
| 233 | - echo -e "${YELLOW}⚠ 基本搜索功能可能有问题,但继续进行${NC}" | ||
| 234 | -fi | ||
| 235 | - | ||
| 236 | -# 输出环境信息 | ||
| 237 | -echo -e "${GREEN}========================================${NC}" | ||
| 238 | -echo -e "${GREEN}测试环境启动完成!${NC}" | ||
| 239 | -echo -e "${GREEN}========================================${NC}" | ||
| 240 | -echo -e "${BLUE}服务信息:${NC}" | ||
| 241 | -echo " Elasticsearch: $ES_HOST" | ||
| 242 | -echo " API服务: http://$API_HOST:$API_PORT" | ||
| 243 | -echo " 测试客户: $TENANT_ID" | ||
| 244 | -echo -e "${BLUE}进程信息:${NC}" | ||
| 245 | -echo " API PID: $API_PID" | ||
| 246 | -echo " PID文件: $PID_FILE" | ||
| 247 | -echo -e "${BLUE}日志文件:${NC}" | ||
| 248 | -echo " 环境日志: $LOG_FILE" | ||
| 249 | -echo " API日志: $API_LOG_FILE" | ||
| 250 | -echo " ES日志: $ES_LOG_FILE" | ||
| 251 | -echo -e "${BLUE}测试命令:${NC}" | ||
| 252 | -echo " 运行所有测试: python scripts/run_tests.py" | ||
| 253 | -echo " 单元测试: pytest tests/unit/ -v" | ||
| 254 | -echo " 集成测试: pytest tests/integration/ -v" | ||
| 255 | -echo " API测试: pytest tests/integration/test_api_integration.py -v" | ||
| 256 | -echo "e${NC}" | ||
| 257 | -echo -e "${BLUE}停止环境: ./scripts/stop_test_environment.sh${NC}" | ||
| 258 | - | ||
| 259 | -# 保存环境变量到文件供测试脚本使用 | ||
| 260 | -cat > "$PROJECT_ROOT/test_env.sh" << EOF | ||
| 261 | -#!/bin/bash | ||
| 262 | -export ES_HOST="$ES_HOST" | ||
| 263 | -export ES_USERNAME="$ES_USERNAME" | ||
| 264 | -export ES_PASSWORD="$ES_PASSWORD" | ||
| 265 | -export API_HOST="$API_HOST" | ||
| 266 | -export API_PORT="$API_PORT" | ||
| 267 | -export TENANT_ID="$TENANT_ID" | ||
| 268 | -export TESTING_MODE="$TESTING_MODE" | ||
| 269 | -export LOG_LEVEL="$LOG_LEVEL" | ||
| 270 | -export PYTHONPATH="$PROJECT_ROOT:\$PYTHONPATH" | ||
| 271 | -EOF | ||
| 272 | - | ||
| 273 | -chmod +x "$PROJECT_ROOT/test_env.sh" | ||
| 274 | - | ||
| 275 | -echo -e "${GREEN}测试环境已准备就绪!${NC}" | ||
| 276 | \ No newline at end of file | 0 | \ No newline at end of file |
scripts/stop_test_environment.sh deleted
| @@ -1,82 +0,0 @@ | @@ -1,82 +0,0 @@ | ||
| 1 | -#!/bin/bash | ||
| 2 | - | ||
| 3 | -# 停止测试环境脚本 | ||
| 4 | - | ||
| 5 | -set -e | ||
| 6 | - | ||
| 7 | -# 颜色定义 | ||
| 8 | -RED='\033[0;31m' | ||
| 9 | -GREEN='\033[0;32m' | ||
| 10 | -YELLOW='\033[1;33m' | ||
| 11 | -BLUE='\033[0;34m' | ||
| 12 | -NC='\033[0m' # No Color | ||
| 13 | - | ||
| 14 | -# 配置 | ||
| 15 | -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" | ||
| 16 | -PROJECT_ROOT="$(dirname "$SCRIPT_DIR")" | ||
| 17 | -PID_FILE="$PROJECT_ROOT/test_environment.pid" | ||
| 18 | -ES_PID_FILE="$PROJECT_ROOT/test_logs/es.pid" | ||
| 19 | - | ||
| 20 | -echo -e "${BLUE}========================================${NC}" | ||
| 21 | -echo -e "${BLUE}停止测试环境${NC}" | ||
| 22 | -echo -e "${BLUE}========================================${NC}" | ||
| 23 | - | ||
| 24 | -# 停止API服务 | ||
| 25 | -if [ -f "$PID_FILE" ]; then | ||
| 26 | - API_PID=$(cat "$PID_FILE") | ||
| 27 | - if ps -p $API_PID > /dev/null 2>&1; then | ||
| 28 | - echo -e "${BLUE}停止API服务 (PID: $API_PID)...${NC}" | ||
| 29 | - kill $API_PID | ||
| 30 | - | ||
| 31 | - # 等待进程结束 | ||
| 32 | - for i in {1..10}; do | ||
| 33 | - if ! ps -p $API_PID > /dev/null 2>&1; then | ||
| 34 | - echo -e "${GREEN}✓ API服务已停止${NC}" | ||
| 35 | - break | ||
| 36 | - fi | ||
| 37 | - if [ $i -eq 10 ]; then | ||
| 38 | - echo -e "${YELLOW}强制停止API服务...${NC}" | ||
| 39 | - kill -9 $API_PID 2>/dev/null || true | ||
| 40 | - fi | ||
| 41 | - sleep 1 | ||
| 42 | - done | ||
| 43 | - else | ||
| 44 | - echo -e "${YELLOW}API服务进程不存在${NC}" | ||
| 45 | - fi | ||
| 46 | - rm -f "$PID_FILE" | ||
| 47 | -else | ||
| 48 | - echo -e "${YELLOW}未找到API服务PID文件${NC}" | ||
| 49 | -fi | ||
| 50 | - | ||
| 51 | -# 停止Elasticsearch(如果是本地启动的) | ||
| 52 | -if [ -f "$ES_PID_FILE" ]; then | ||
| 53 | - ES_PID=$(cat "$ES_PID_FILE") | ||
| 54 | - if ps -p $ES_PID > /dev/null 2>&1; then | ||
| 55 | - echo -e "${BLUE}停止本地Elasticsearch (PID: $ES_PID)...${NC}" | ||
| 56 | - kill $ES_PID | ||
| 57 | - rm -f "$ES_PID_FILE" | ||
| 58 | - echo -e "${GREEN}✓ Elasticsearch已停止${NC}" | ||
| 59 | - else | ||
| 60 | - echo -e "${YELLOW}Elasticsearch进程不存在${NC}" | ||
| 61 | - rm -f "$ES_PID_FILE" | ||
| 62 | - fi | ||
| 63 | -else | ||
| 64 | - echo -e "${BLUE}跳过本地Elasticsearch停止(未找到PID文件)${NC}" | ||
| 65 | -fi | ||
| 66 | - | ||
| 67 | -# 清理测试环境文件 | ||
| 68 | -echo -e "${BLUE}清理测试环境文件...${NC}" | ||
| 69 | -rm -f "$PROJECT_ROOT/test_env.sh" | ||
| 70 | - | ||
| 71 | -# 清理测试索引(可选) | ||
| 72 | -read -p "是否删除测试索引? (y/N): " -n 1 -r | ||
| 73 | -echo | ||
| 74 | -if [[ $REPLY =~ ^[Yy]$ ]]; then | ||
| 75 | - echo -e "${BLUE}删除测试索引...${NC}" | ||
| 76 | - curl -X DELETE "http://localhost:9200/test_products" 2>/dev/null || true | ||
| 77 | - echo -e "${GREEN}✓ 测试索引已删除${NC}" | ||
| 78 | -fi | ||
| 79 | - | ||
| 80 | -echo -e "${GREEN}========================================${NC}" | ||
| 81 | -echo -e "${GREEN}测试环境已停止!${NC}" | ||
| 82 | -echo -e "${GREEN}========================================${NC}" | ||
| 83 | \ No newline at end of file | 0 | \ No newline at end of file |
scripts/test_base.py deleted
| @@ -1,242 +0,0 @@ | @@ -1,242 +0,0 @@ | ||
| 1 | -#!/usr/bin/env python3 | ||
| 2 | -""" | ||
| 3 | -Test script for base configuration. | ||
| 4 | - | ||
| 5 | -Tests data ingestion, search API, response format, and tenant isolation. | ||
| 6 | -""" | ||
| 7 | - | ||
| 8 | -import sys | ||
| 9 | -import os | ||
| 10 | -import argparse | ||
| 11 | -import requests | ||
| 12 | -import json | ||
| 13 | -from pathlib import Path | ||
| 14 | - | ||
| 15 | -# Add parent directory to path | ||
| 16 | -sys.path.insert(0, str(Path(__file__).parent.parent)) | ||
| 17 | - | ||
| 18 | - | ||
| 19 | -def test_search_api(base_url: str, tenant_id: str, query: str = "耳机"): | ||
| 20 | - """ | ||
| 21 | - Test search API. | ||
| 22 | - | ||
| 23 | - Args: | ||
| 24 | - base_url: API base URL | ||
| 25 | - tenant_id: Tenant ID | ||
| 26 | - query: Search query | ||
| 27 | - | ||
| 28 | - Returns: | ||
| 29 | - Response JSON or None if failed | ||
| 30 | - """ | ||
| 31 | - url = f"{base_url}/search/" | ||
| 32 | - headers = { | ||
| 33 | - "X-Tenant-ID": tenant_id, | ||
| 34 | - "Content-Type": "application/json" | ||
| 35 | - } | ||
| 36 | - payload = { | ||
| 37 | - "query": query, | ||
| 38 | - "size": 10, | ||
| 39 | - "from": 0 | ||
| 40 | - } | ||
| 41 | - | ||
| 42 | - print(f"\nTesting search API:") | ||
| 43 | - print(f" URL: {url}") | ||
| 44 | - print(f" Query: {query}") | ||
| 45 | - print(f" Tenant ID: {tenant_id}") | ||
| 46 | - | ||
| 47 | - try: | ||
| 48 | - response = requests.post(url, json=payload, headers=headers, timeout=30) | ||
| 49 | - response.raise_for_status() | ||
| 50 | - data = response.json() | ||
| 51 | - | ||
| 52 | - print(f" Status: {response.status_code}") | ||
| 53 | - print(f" Total: {data.get('total', 0)}") | ||
| 54 | - print(f" Results: {len(data.get('results', []))}") | ||
| 55 | - | ||
| 56 | - return data | ||
| 57 | - except Exception as e: | ||
| 58 | - print(f" ERROR: {e}") | ||
| 59 | - return None | ||
| 60 | - | ||
| 61 | - | ||
| 62 | -def validate_response_format(data: dict): | ||
| 63 | - """ | ||
| 64 | - Validate response format. | ||
| 65 | - | ||
| 66 | - Args: | ||
| 67 | - data: Response data | ||
| 68 | - | ||
| 69 | - Returns: | ||
| 70 | - List of validation errors (empty if valid) | ||
| 71 | - """ | ||
| 72 | - errors = [] | ||
| 73 | - | ||
| 74 | - # Check for results field (not hits) | ||
| 75 | - if 'hits' in data: | ||
| 76 | - errors.append("Response contains 'hits' field (should be 'results')") | ||
| 77 | - | ||
| 78 | - if 'results' not in data: | ||
| 79 | - errors.append("Response missing 'results' field") | ||
| 80 | - else: | ||
| 81 | - results = data['results'] | ||
| 82 | - if not isinstance(results, list): | ||
| 83 | - errors.append("'results' should be a list") | ||
| 84 | - else: | ||
| 85 | - # Validate first result structure | ||
| 86 | - if results: | ||
| 87 | - result = results[0] | ||
| 88 | - required_fields = ['spu_id', 'title', 'skus', 'relevance_score'] | ||
| 89 | - for field in required_fields: | ||
| 90 | - if field not in result: | ||
| 91 | - errors.append(f"Result missing required field: {field}") | ||
| 92 | - | ||
| 93 | - # Check for ES internal fields | ||
| 94 | - es_internal_fields = ['_id', '_score', '_source'] | ||
| 95 | - for field in es_internal_fields: | ||
| 96 | - if field in result: | ||
| 97 | - errors.append(f"Result contains ES internal field: {field}") | ||
| 98 | - | ||
| 99 | - # Validate skus | ||
| 100 | - if 'skus' in result: | ||
| 101 | - skus = result['skus'] | ||
| 102 | - if not isinstance(skus, list): | ||
| 103 | - errors.append("'skus' should be a list") | ||
| 104 | - elif skus: | ||
| 105 | - sku = skus[0] | ||
| 106 | - sku_required = ['sku_id', 'price', 'sku', 'stock'] | ||
| 107 | - for field in sku_required: | ||
| 108 | - if field not in sku: | ||
| 109 | - errors.append(f"SKU missing required field: {field}") | ||
| 110 | - | ||
| 111 | - # Check for suggestions and related_searches | ||
| 112 | - if 'suggestions' not in data: | ||
| 113 | - errors.append("Response missing 'suggestions' field") | ||
| 114 | - if 'related_searches' not in data: | ||
| 115 | - errors.append("Response missing 'related_searches' field") | ||
| 116 | - | ||
| 117 | - return errors | ||
| 118 | - | ||
| 119 | - | ||
| 120 | -def test_facets(base_url: str, tenant_id: str): | ||
| 121 | - """ | ||
| 122 | - Test facets aggregation. | ||
| 123 | - | ||
| 124 | - Args: | ||
| 125 | - base_url: API base URL | ||
| 126 | - tenant_id: Tenant ID | ||
| 127 | - | ||
| 128 | - Returns: | ||
| 129 | - Response JSON or None if failed | ||
| 130 | - """ | ||
| 131 | - url = f"{base_url}/search/" | ||
| 132 | - headers = { | ||
| 133 | - "X-Tenant-ID": tenant_id, | ||
| 134 | - "Content-Type": "application/json" | ||
| 135 | - } | ||
| 136 | - payload = { | ||
| 137 | - "query": "商品", | ||
| 138 | - "size": 10, | ||
| 139 | - "facets": ["category.keyword", "vendor.keyword"] | ||
| 140 | - } | ||
| 141 | - | ||
| 142 | - print(f"\nTesting facets:") | ||
| 143 | - print(f" Facets: {payload['facets']}") | ||
| 144 | - | ||
| 145 | - try: | ||
| 146 | - response = requests.post(url, json=payload, headers=headers, timeout=30) | ||
| 147 | - response.raise_for_status() | ||
| 148 | - data = response.json() | ||
| 149 | - | ||
| 150 | - if 'facets' in data and data['facets']: | ||
| 151 | - print(f" Facets returned: {len(data['facets'])}") | ||
| 152 | - for facet in data['facets']: | ||
| 153 | - print(f" - {facet.get('field')}: {len(facet.get('values', []))} values") | ||
| 154 | - else: | ||
| 155 | - print(" WARNING: No facets returned") | ||
| 156 | - | ||
| 157 | - return data | ||
| 158 | - except Exception as e: | ||
| 159 | - print(f" ERROR: {e}") | ||
| 160 | - return None | ||
| 161 | - | ||
| 162 | - | ||
| 163 | -def test_tenant_isolation(base_url: str, tenant_id_1: str, tenant_id_2: str): | ||
| 164 | - """ | ||
| 165 | - Test tenant isolation. | ||
| 166 | - | ||
| 167 | - Args: | ||
| 168 | - base_url: API base URL | ||
| 169 | - tenant_id_1: First tenant ID | ||
| 170 | - tenant_id_2: Second tenant ID | ||
| 171 | - """ | ||
| 172 | - print(f"\nTesting tenant isolation:") | ||
| 173 | - print(f" Tenant 1: {tenant_id_1}") | ||
| 174 | - print(f" Tenant 2: {tenant_id_2}") | ||
| 175 | - | ||
| 176 | - # Search for tenant 1 | ||
| 177 | - data1 = test_search_api(base_url, tenant_id_1, "商品") | ||
| 178 | - # Search for tenant 2 | ||
| 179 | - data2 = test_search_api(base_url, tenant_id_2, "商品") | ||
| 180 | - | ||
| 181 | - if data1 and data2: | ||
| 182 | - results1 = set(r.get('spu_id') for r in data1.get('results', [])) | ||
| 183 | - results2 = set(r.get('spu_id') for r in data2.get('results', [])) | ||
| 184 | - | ||
| 185 | - overlap = results1 & results2 | ||
| 186 | - if overlap: | ||
| 187 | - print(f" WARNING: Found {len(overlap)} overlapping results between tenants") | ||
| 188 | - else: | ||
| 189 | - print(f" OK: No overlapping results (tenant isolation working)") | ||
| 190 | - | ||
| 191 | - | ||
| 192 | -def main(): | ||
| 193 | - parser = argparse.ArgumentParser(description='Test base configuration') | ||
| 194 | - parser.add_argument('--api-url', default='http://localhost:8000', help='API base URL') | ||
| 195 | - parser.add_argument('--tenant-id', default='1', help='Tenant ID for testing') | ||
| 196 | - parser.add_argument('--test-tenant-2', help='Second tenant ID for isolation test') | ||
| 197 | - | ||
| 198 | - args = parser.parse_args() | ||
| 199 | - | ||
| 200 | - print("=" * 60) | ||
| 201 | - print("Base Configuration Test Suite") | ||
| 202 | - print("=" * 60) | ||
| 203 | - | ||
| 204 | - # Test 1: Basic search | ||
| 205 | - print("\n[Test 1] Basic Search") | ||
| 206 | - data = test_search_api(args.api_url, args.tenant_id) | ||
| 207 | - if not data: | ||
| 208 | - print("FAILED: Basic search test") | ||
| 209 | - return 1 | ||
| 210 | - | ||
| 211 | - # Test 2: Response format validation | ||
| 212 | - print("\n[Test 2] Response Format Validation") | ||
| 213 | - errors = validate_response_format(data) | ||
| 214 | - if errors: | ||
| 215 | - print("FAILED: Response format validation") | ||
| 216 | - for error in errors: | ||
| 217 | - print(f" - {error}") | ||
| 218 | - return 1 | ||
| 219 | - else: | ||
| 220 | - print("PASSED: Response format is correct") | ||
| 221 | - | ||
| 222 | - # Test 3: Facets | ||
| 223 | - print("\n[Test 3] Facets Aggregation") | ||
| 224 | - facet_data = test_facets(args.api_url, args.tenant_id) | ||
| 225 | - if not facet_data: | ||
| 226 | - print("WARNING: Facets test failed (may be expected if no data)") | ||
| 227 | - | ||
| 228 | - # Test 4: Tenant isolation (if second tenant provided) | ||
| 229 | - if args.test_tenant_2: | ||
| 230 | - print("\n[Test 4] Tenant Isolation") | ||
| 231 | - test_tenant_isolation(args.api_url, args.tenant_id, args.test_tenant_2) | ||
| 232 | - | ||
| 233 | - print("\n" + "=" * 60) | ||
| 234 | - print("All tests completed") | ||
| 235 | - print("=" * 60) | ||
| 236 | - | ||
| 237 | - return 0 | ||
| 238 | - | ||
| 239 | - | ||
| 240 | -if __name__ == '__main__': | ||
| 241 | - sys.exit(main()) | ||
| 242 | - |
scripts/test_frontend.sh deleted
| @@ -1,94 +0,0 @@ | @@ -1,94 +0,0 @@ | ||
| 1 | -#!/bin/bash | ||
| 2 | - | ||
| 3 | -# Test Frontend - Quick verification script | ||
| 4 | - | ||
| 5 | -set -e | ||
| 6 | - | ||
| 7 | -GREEN='\033[0;32m' | ||
| 8 | -YELLOW='\033[1;33m' | ||
| 9 | -RED='\033[0;31m' | ||
| 10 | -NC='\033[0m' | ||
| 11 | - | ||
| 12 | -API_URL="http://120.76.41.98:6002" | ||
| 13 | - | ||
| 14 | -echo -e "${GREEN}========================================${NC}" | ||
| 15 | -echo -e "${GREEN}Frontend Test Script${NC}" | ||
| 16 | -echo -e "${GREEN}========================================${NC}" | ||
| 17 | - | ||
| 18 | -echo -e "\n${YELLOW}Testing API endpoints...${NC}" | ||
| 19 | - | ||
| 20 | -# Test 1: Health check | ||
| 21 | -echo -e "\n1. Testing health endpoint..." | ||
| 22 | -if curl -s "${API_URL}/health" > /dev/null; then | ||
| 23 | - echo -e "${GREEN}✓ Health check passed${NC}" | ||
| 24 | -else | ||
| 25 | - echo -e "${RED}✗ Health check failed${NC}" | ||
| 26 | - exit 1 | ||
| 27 | -fi | ||
| 28 | - | ||
| 29 | -# Test 2: Frontend HTML | ||
| 30 | -echo -e "\n2. Testing frontend HTML..." | ||
| 31 | -if curl -s "${API_URL}/" | grep -q "Product Search"; then | ||
| 32 | - echo -e "${GREEN}✓ Frontend HTML accessible${NC}" | ||
| 33 | -else | ||
| 34 | - echo -e "${RED}✗ Frontend HTML not found${NC}" | ||
| 35 | - exit 1 | ||
| 36 | -fi | ||
| 37 | - | ||
| 38 | -# Test 3: Static CSS | ||
| 39 | -echo -e "\n3. Testing static CSS..." | ||
| 40 | -if curl -s "${API_URL}/static/css/style.css" | grep -q "page-container"; then | ||
| 41 | - echo -e "${GREEN}✓ CSS file accessible${NC}" | ||
| 42 | -else | ||
| 43 | - echo -e "${RED}✗ CSS file not found${NC}" | ||
| 44 | - exit 1 | ||
| 45 | -fi | ||
| 46 | - | ||
| 47 | -# Test 4: Static JS | ||
| 48 | -echo -e "\n4. Testing static JavaScript..." | ||
| 49 | -if curl -s "${API_URL}/static/js/app.js" | grep -q "performSearch"; then | ||
| 50 | - echo -e "${GREEN}✓ JavaScript file accessible${NC}" | ||
| 51 | -else | ||
| 52 | - echo -e "${RED}✗ JavaScript file not found${NC}" | ||
| 53 | - exit 1 | ||
| 54 | -fi | ||
| 55 | - | ||
| 56 | -# Test 5: Search API | ||
| 57 | -echo -e "\n5. Testing search API..." | ||
| 58 | -SEARCH_RESULT=$(curl -s -X POST "${API_URL}/search/" \ | ||
| 59 | - -H "Content-Type: application/json" \ | ||
| 60 | - -d '{"query":"玩具","size":5}') | ||
| 61 | - | ||
| 62 | -if echo "$SEARCH_RESULT" | grep -q "hits"; then | ||
| 63 | - echo -e "${GREEN}✓ Search API working${NC}" | ||
| 64 | - TOTAL=$(echo "$SEARCH_RESULT" | grep -o '"total":[0-9]*' | cut -d: -f2) | ||
| 65 | - echo -e " Found ${YELLOW}${TOTAL}${NC} results" | ||
| 66 | -else | ||
| 67 | - echo -e "${RED}✗ Search API failed${NC}" | ||
| 68 | - exit 1 | ||
| 69 | -fi | ||
| 70 | - | ||
| 71 | -echo -e "\n${GREEN}========================================${NC}" | ||
| 72 | -echo -e "${GREEN}All tests passed! ✓${NC}" | ||
| 73 | -echo -e "${GREEN}========================================${NC}" | ||
| 74 | - | ||
| 75 | -echo -e "\n${YELLOW}Frontend is ready!${NC}" | ||
| 76 | -echo -e "Open in browser: ${GREEN}${API_URL}/${NC}" | ||
| 77 | - | ||
| 78 | -echo -e "\n${YELLOW}Quick Start Guide:${NC}" | ||
| 79 | -echo "1. Open browser and go to: ${API_URL}/" | ||
| 80 | -echo "2. Enter a search query (e.g., '玩具')" | ||
| 81 | -echo "3. Click on filter tags to refine results" | ||
| 82 | -echo "4. Use sort buttons with arrows to sort" | ||
| 83 | -echo "5. Use pagination at the bottom to browse" | ||
| 84 | - | ||
| 85 | -echo -e "\n${YELLOW}Key Features:${NC}" | ||
| 86 | -echo "- Clean white background design" | ||
| 87 | -echo "- Horizontal filter tags (categories, brands, suppliers)" | ||
| 88 | -echo "- Sort buttons with up/down arrows for ascending/descending" | ||
| 89 | -echo "- Product grid with images, prices, MOQ info" | ||
| 90 | -echo "- Full pagination support" | ||
| 91 | -echo "- Responsive design for mobile and desktop" | ||
| 92 | - | ||
| 93 | -echo -e "\n${GREEN}Enjoy your new frontend! 🎉${NC}" | ||
| 94 | - |
scripts/test_cloud_embedding.py renamed to tests/test_cloud_embedding.py
scripts/test_cnclip_service.py renamed to tests/test_cnclip_service.py
scripts/test_facet_api.py renamed to tests/test_facet_api.py