فهرست منبع

解决一级指标去掉时候,后续指标能正常出报告且不报错“

cicv 2 روز پیش
والد
کامیت
6d4aeed062
33فایلهای تغییر یافته به همراه1010 افزوده شده و 823 حذف شده
  1. 85 85
      config/all_metrics_config.yaml
  2. 41 39
      config/builtin_metrics_config.yaml
  3. BIN
      modules/lib/__pycache__/chart_generator.cpython-312.pyc
  4. BIN
      modules/lib/__pycache__/chart_generator.cpython-313.pyc
  5. BIN
      modules/lib/__pycache__/common.cpython-312.pyc
  6. BIN
      modules/lib/__pycache__/common.cpython-313.pyc
  7. BIN
      modules/lib/__pycache__/data_process.cpython-312.pyc
  8. BIN
      modules/lib/__pycache__/data_process.cpython-313.pyc
  9. BIN
      modules/lib/__pycache__/log_manager.cpython-312.pyc
  10. BIN
      modules/lib/__pycache__/log_manager.cpython-313.pyc
  11. BIN
      modules/lib/__pycache__/metric_registry.cpython-312.pyc
  12. BIN
      modules/lib/__pycache__/metric_registry.cpython-313.pyc
  13. BIN
      modules/lib/__pycache__/score.cpython-312.pyc
  14. BIN
      modules/lib/__pycache__/score.cpython-313.pyc
  15. 426 420
      modules/lib/chart_generator.py
  16. 7 7
      modules/lib/data_process.py
  17. 3 3
      modules/lib/score.py
  18. BIN
      modules/metric/__pycache__/comfort.cpython-312.pyc
  19. BIN
      modules/metric/__pycache__/comfort.cpython-313.pyc
  20. BIN
      modules/metric/__pycache__/efficient.cpython-312.pyc
  21. BIN
      modules/metric/__pycache__/efficient.cpython-313.pyc
  22. BIN
      modules/metric/__pycache__/function.cpython-312.pyc
  23. BIN
      modules/metric/__pycache__/function.cpython-313.pyc
  24. BIN
      modules/metric/__pycache__/safety.cpython-312.pyc
  25. BIN
      modules/metric/__pycache__/safety.cpython-313.pyc
  26. BIN
      modules/metric/__pycache__/traffic.cpython-312.pyc
  27. BIN
      modules/metric/__pycache__/traffic.cpython-313.pyc
  28. 33 12
      modules/metric/comfort.py
  29. 123 86
      modules/metric/efficient.py
  30. 25 2
      modules/metric/function.py
  31. 25 3
      modules/metric/safety.py
  32. 24 2
      modules/metric/traffic.py
  33. 218 164
      scripts/evaluator_enhanced.py

+ 85 - 85
config/all_metrics_config.yaml

@@ -17,91 +17,91 @@ T_threshold:
   T2_threshold: 5
 
 
-safety:
-  name: safety
-  priority: 0
-  safeTime:
-    name: safetime
-    priority: 0
-    CustomTTC:  
-      name: CustomTTC
-      priority: 0
-      max: 20.0
-      min: 3.5
-    TTC:
-      name: TTC
-      priority: 0
-      max: 2000.0
-      min: 2.86
-    MTTC:
-      name: MTTC
-      priority: 0
-      max: 2000.0
-      min: 3.0
-    THW:
-      name: THW
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    TLC:
-      name: TLC
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    TTB:
-      name: TTB
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    TM:
-      name: TM
-      priority: 0
-      max: 2000.0
-      min: 1.5
-  safeDistance:
-    name: safeDistance
-    priority: 0
-    LonSD:
-      name: LonSD
-      priority: 0
-      max: 2000.0
-      min: 10.0
-    LatSD:
-      name: LatSD 
-      priority: 0
-      max: 2000.0
-      min: 2.0
-    DTC:
-      name: DTC
-      priority: 0
-      max: 2000.0
-      min: 2.0
-  safeAcceleration:
-    name: safeAcceleration
-    priority: 0
-    BTN:
-      name: BTN
-      priority: 0
-      max: 1.0
-      min: -2000.0
-  safeProbability:
-    name: safeProbability
-    priority: 0
-    collisionRisk:
-      name: collisionRisk
-      priority: 0
-      max: 10.0
-      min: 0.0
-    collisionSeverity:
-      name: collisionSeverity
-      priority: 0
-      max: 10.0
-      min: 0.0
-    PSD:
-      name: PSD
-      priority: 0
-      max: 2000.0
-      min: 2.0
+# safety:
+#   name: safety
+#   priority: 0
+#   safeTime:
+#     name: safetime
+#     priority: 0
+#     CustomTTC:  
+#       name: CustomTTC
+#       priority: 0
+#       max: 20.0
+#       min: 3.5
+#     TTC:
+#       name: TTC
+#       priority: 0
+#       max: 2000.0
+#       min: 2.86
+#     MTTC:
+#       name: MTTC
+#       priority: 0
+#       max: 2000.0
+#       min: 3.0
+#     THW:
+#       name: THW
+#       priority: 0
+#       max: 2000.0
+#       min: 1.5
+#     TLC:
+#       name: TLC
+#       priority: 0
+#       max: 2000.0
+#       min: 1.5
+#     TTB:
+#       name: TTB
+#       priority: 0
+#       max: 2000.0
+#       min: 1.5
+#     TM:
+#       name: TM
+#       priority: 0
+#       max: 2000.0
+#       min: 1.5
+#   safeDistance:
+#     name: safeDistance
+#     priority: 0
+#     LonSD:
+#       name: LonSD
+#       priority: 0
+#       max: 2000.0
+#       min: 10.0
+#     LatSD:
+#       name: LatSD 
+#       priority: 0
+#       max: 2000.0
+#       min: 2.0
+#     DTC:
+#       name: DTC
+#       priority: 0
+#       max: 2000.0
+#       min: 2.0
+#   safeAcceleration:
+#     name: safeAcceleration
+#     priority: 0
+#     BTN:
+#       name: BTN
+#       priority: 0
+#       max: 1.0
+#       min: -2000.0
+#   safeProbability:
+#     name: safeProbability
+#     priority: 0
+#     collisionRisk:
+#       name: collisionRisk
+#       priority: 0
+#       max: 10.0
+#       min: 0.0
+#     collisionSeverity:
+#       name: collisionSeverity
+#       priority: 0
+#       max: 10.0
+#       min: 0.0
+#     PSD:
+#       name: PSD
+#       priority: 0
+#       max: 2000.0
+#       min: 2.0
 
 user:
   name: user

+ 41 - 39
config/builtin_metrics_config.yaml

@@ -4,11 +4,6 @@ safety:
   safeTime:
     name: safetime
     priority: 0
-    CustomTTC:  
-      name: CustomTTC
-      priority: 0
-      max: 20.0
-      min: 3.5
     TTC:
       name: TTC
       priority: 0
@@ -52,6 +47,11 @@ safety:
       priority: 0
       max: 2000.0
       min: 2.0
+    DTC:
+      name: DTC
+      priority: 0
+      max: 2000.0
+      min: 2.0
   safeAcceleration:
     name: safeAcceleration
     priority: 0
@@ -73,19 +73,11 @@ safety:
       priority: 0
       max: 10.0
       min: 0.0
-
-user:
-  name: user
-  priority: 0
-  safeTime:
-    name: safetime
-    priority: 0
-    CustomTTC:
-      name: customTTC
+    PSD:
+      name: PSD
       priority: 0
-      max: 20.0
-      min: 3.5
-
+      max: 2000.0
+      min: 2.0
 comfort:
   name: comfort
   priority: 0
@@ -197,29 +189,39 @@ efficient:
 function:
   name: function
   priority: 0
-  ForwardCollision:
-    name: ForwardCollision
+  TrafficSignalRecognitionAndResponse:
+    name: TrafficSignalRecognitionAndResponse
     priority: 0
-    latestWarningDistance_TTC_LST:
-      name: latestWarningDistance_TTC_LST
-      priority: 0
-      max: 3.11
-      min: 1.89
-    earliestWarningDistance_TTC_LST:
-      name: earliestWarningDistance_TTC_LST
-      priority: 0
-      max: 3.11
-      min: 1.89
-    latestWarningDistance_LST:
-      name: latestWarningDistance_LST
-      priority: 0
-      max: 17.29
-      min: 10.51
-    earliestWarningDistance_LST:
-      name: earliestWarningDistance_LST
-      priority: 0
-      max: 17.29
-      min: 10.51
+    limitSpeed_LST:
+      name: limitSpeed_LST
+      priority: 0
+      max: 30
+      min: 0
+    leastDistance_LST:
+      name: leastDistance_LST
+      priority: 0
+      max: 0
+      min: 4
+    launchTimeinStopLine_LST:
+      name: launchTimeinStopLine_LST
+      priority: 0
+      max: 5
+      min: 0
+    noStop_LST:
+      name: noStop_LST
+      priority: 0
+      max: 1
+      min: 0
+    launchTimeinTrafficLight_LST:
+      name: launchTimeinTrafficLight_LST
+      priority: 0
+      max: 5
+      min: 0
+    crossJunctionToTargetLane_LST:
+      name: crossJunctionToTargetLane_LST
+      priority: 0
+      max: 1
+      min: 1
       
 traffic:
   name: traffic

BIN
modules/lib/__pycache__/chart_generator.cpython-312.pyc


BIN
modules/lib/__pycache__/chart_generator.cpython-313.pyc


BIN
modules/lib/__pycache__/common.cpython-312.pyc


BIN
modules/lib/__pycache__/common.cpython-313.pyc


BIN
modules/lib/__pycache__/data_process.cpython-312.pyc


BIN
modules/lib/__pycache__/data_process.cpython-313.pyc


BIN
modules/lib/__pycache__/log_manager.cpython-312.pyc


BIN
modules/lib/__pycache__/log_manager.cpython-313.pyc


BIN
modules/lib/__pycache__/metric_registry.cpython-312.pyc


BIN
modules/lib/__pycache__/metric_registry.cpython-313.pyc


BIN
modules/lib/__pycache__/score.cpython-312.pyc


BIN
modules/lib/__pycache__/score.cpython-313.pyc


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 426 - 420
modules/lib/chart_generator.py


+ 7 - 7
modules/lib/data_process.py

@@ -80,30 +80,30 @@ class DataPreprocessing:
         modules = ["vehicle", "T_threshold", "safety", "comfort", "efficient", "function", "traffic"]
 
         # 1. 初始化 vehicle_config(不涉及 T_threshold 合并)
-        self.vehicle_config = full_config[modules[0]]
+        self.vehicle_config = full_config.get(modules[0], {})
 
         # 2. 定义 T_threshold_config(封装为字典)
-        T_threshold_config = {"T_threshold": full_config[modules[1]]}
+        T_threshold_config = {"T_threshold": full_config.get(modules[1], {})}
 
         # 3. 统一处理需要合并 T_threshold 的模块
         # 3.1 safety_config
-        self.safety_config = {"safety": full_config[modules[2]]}
+        self.safety_config = {"safety": full_config.get(modules[2], {})}
         self.safety_config.update(T_threshold_config)
 
         # 3.2 comfort_config
-        self.comfort_config = {"comfort": full_config[modules[3]]}
+        self.comfort_config = {"comfort": full_config.get(modules[3], {})}
         self.comfort_config.update(T_threshold_config)
 
         # 3.3 efficient_config
-        self.efficient_config = {"efficient": full_config[modules[4]]}
+        self.efficient_config = {"efficient": full_config.get(modules[4], {})}
         self.efficient_config.update(T_threshold_config)
 
         # 3.4 function_config
-        self.function_config = {"function": full_config[modules[5]]}
+        self.function_config = {"function": full_config.get(modules[5], {})}
         self.function_config.update(T_threshold_config)
 
         # 3.5 traffic_config
-        self.traffic_config = {"traffic": full_config[modules[6]]}
+        self.traffic_config = {"traffic": full_config.get(modules[6], {})}
         self.traffic_config.update(T_threshold_config)
 
     @staticmethod

+ 3 - 3
modules/lib/score.py

@@ -35,9 +35,9 @@ class Score:
         # self.module_name = module_name
         self.module_config = module_config
         self.t_threshold = t_threshold
-        self.logger.info(f'模块名称:{self.module_name}')
-        self.logger.info(f'模块配置:{self.module_config}')
-        self.logger.info(f'T_threshold: {t_threshold}')
+        self.logger.info(f'评分模型中模块名称:{self.module_name}')
+        self.logger.info(f'评分模型中模块配置:{self.module_config}')
+        self.logger.info(f'评分模型中T_threshold: {t_threshold}')
     def _extract_level_3_metrics(self, d):
         name = []
         for key, value in d.items():

BIN
modules/metric/__pycache__/comfort.cpython-312.pyc


BIN
modules/metric/__pycache__/comfort.cpython-313.pyc


BIN
modules/metric/__pycache__/efficient.cpython-312.pyc


BIN
modules/metric/__pycache__/efficient.cpython-313.pyc


BIN
modules/metric/__pycache__/function.cpython-312.pyc


BIN
modules/metric/__pycache__/function.cpython-313.pyc


BIN
modules/metric/__pycache__/safety.cpython-312.pyc


BIN
modules/metric/__pycache__/safety.cpython-313.pyc


BIN
modules/metric/__pycache__/traffic.cpython-312.pyc


BIN
modules/metric/__pycache__/traffic.cpython-313.pyc


+ 33 - 12
modules/metric/comfort.py

@@ -287,10 +287,19 @@ class ComfortRegistry:
     def __init__(self, data_processed):
         self.logger = LogManager().get_logger()  # 获取全局日志实例
         self.data = data_processed
-        self.comfort_config = data_processed.comfort_config["comfort"]
+        self.output_dir = None  # 图表数据输出目录
+        
+        # 检查comfort_config是否为空
+        if not hasattr(data_processed, 'comfort_config') or not data_processed.comfort_config:
+            self.logger.warning("舒适性配置为空,跳过舒适性指标计算")
+            self.comfort_config = {}
+            self.metrics = []
+            self._registry = {}
+            return
+            
+        self.comfort_config = data_processed.comfort_config.get("comfort", {})
         self.metrics = self._extract_metrics(self.comfort_config)
         self._registry = self._build_registry()
-        self.output_dir = None  # 图表数据输出目录
 
     def _extract_metrics(self, config_node: dict) -> list:
         """DFS遍历提取指标"""
@@ -332,7 +341,29 @@ class ComfortRegistry:
                 results[name] = None
         self.logger.info(f'舒适性指标计算结果:{results}')
         return results
+class ComfortManager:
+    """舒适性指标计算主类"""
+
+    def __init__(self, data_processed):
+        self.data = data_processed
+        self.logger = LogManager().get_logger()
+        # 检查comfort_config是否为空
+        if not hasattr(data_processed, 'comfort_config') or not data_processed.comfort_config:
+            self.logger.warning("舒适性配置为空,跳过舒适性指标计算初始化")
+            self.registry = None
+        else:
+            self.registry = ComfortRegistry(self.data)
+
+    def report_statistic(self):
+        """生成舒适性评分报告"""
+        # 如果registry为None,直接返回空字典
+        if self.registry is None:
+            self.logger.info("舒适性指标管理器未初始化,返回空结果")
+            return {}
+            
+        comfort_result = self.registry.batch_execute()
 
+        return comfort_result
 
 class ComfortCalculator:
     """舒适性指标计算类 - 提供核心计算功能"""
@@ -1861,16 +1892,6 @@ class ComfortCalculator:
         return shake_events
 
 
-class ComfortManager:
-    """舒适性指标计算主类"""
 
-    def __init__(self, data_processed):
-        self.data = data_processed
-        self.logger = LogManager().get_logger()
-        self.registry = ComfortRegistry(self.data)
 
-    def report_statistic(self):
-        """生成舒适性评分报告"""
-        comfort_result = self.registry.batch_execute()
 
-        return comfort_result

+ 123 - 86
modules/metric/efficient.py

@@ -82,7 +82,76 @@ class Efficient:
         self.average_v = self.ego_df['v'].mean() * 3.6  # 转换为 km/h
         self.calculated_value['averagedSpeed'] = self.average_v
         return self.average_v
-
+    def acceleration_smoothness(self):
+        """计算加速度平稳度
+        
+        加速度平稳度用以衡量车辆加减速过程的平滑程度,
+        通过计算加速度序列的波动程度(标准差)来评估。
+        平稳度指标定义为 1-σ_a/a_max(归一化后靠近1代表加速度更稳定)。
+        
+        Returns:
+            float: 加速度平稳度 (0-1之间的比率,越接近1表示越平稳)
+        """
+        # 获取加速度数据
+        # 优先使用车辆坐标系下的加速度数据
+        if 'lon_acc_vehicle' in self.ego_df.columns and 'lat_acc_vehicle' in self.ego_df.columns:
+            # 使用车辆坐标系下的加速度计算合成加速度
+            lon_acc = self.ego_df['lon_acc_vehicle'].values
+            lat_acc = self.ego_df['lat_acc_vehicle'].values
+            accel_magnitude = np.sqrt(lon_acc**2 + lat_acc**2)
+            self.logger.info("使用车辆坐标系下的加速度计算合成加速度")
+        elif 'accelX' in self.ego_df.columns and 'accelY' in self.ego_df.columns:
+            # 计算合成加速度(考虑X和Y方向)
+            accel_x = self.ego_df['accelX'].values
+            accel_y = self.ego_df['accelY'].values
+            accel_magnitude = np.sqrt(accel_x**2 + accel_y**2)
+            self.logger.info("使用accelX和accelY计算合成加速度")
+        else:
+            # 从速度差分计算加速度
+            velocity = self.ego_df['v'].values
+            time_diff = self.ego_df['simTime'].diff().fillna(0).values
+            # 避免除以零
+            time_diff[time_diff == 0] = 1e-6
+            accel_magnitude = np.abs(np.diff(velocity, prepend=velocity[0]) / time_diff)
+            self.logger.info("从速度差分计算加速度")
+        
+        # 过滤掉异常值(可选)
+        # 使用3倍标准差作为阈值
+        mean_accel = np.mean(accel_magnitude)
+        std_accel = np.std(accel_magnitude)
+        threshold = mean_accel + 3 * std_accel
+        filtered_accel = accel_magnitude[accel_magnitude <= threshold]
+        
+        # 如果过滤后数据太少,则使用原始数据
+        if len(filtered_accel) < len(accel_magnitude) * 0.8:
+            filtered_accel = accel_magnitude
+            self.logger.info("过滤后数据太少,使用原始加速度数据")
+        else:
+            self.logger.info(f"过滤掉 {len(accel_magnitude) - len(filtered_accel)} 个异常加速度值")
+        
+        # 计算加速度标准差
+        accel_std = np.std(filtered_accel)
+        
+        # 计算最大加速度(使用95百分位数以避免极端值影响)
+        accel_max = np.percentile(filtered_accel, 95)
+        
+        # 防止除以零
+        if accel_max < 0.001:
+            accel_max = 0.001
+        
+        # 计算平稳度指标: 1 - σ_a/a_max
+        smoothness = 1.0 - (accel_std / accel_max)
+        
+        # 限制在0-1范围内
+        smoothness = np.clip(smoothness, 0.0, 1.0)
+        
+        self.calculated_value['accelerationSmoothness'] = smoothness
+        
+        self.logger.info(f"加速度标准差: {accel_std:.4f} m/s²")
+        self.logger.info(f"加速度最大值(95百分位): {accel_max:.4f} m/s²")
+        self.logger.info(f"加速度平稳度(Acceleration Smoothness): {smoothness:.4f}")
+        
+        return smoothness
     def stop_duration_and_count(self):
         """计算停车次数和平均停车时长
         
@@ -189,7 +258,22 @@ class Efficient:
         self.logger.info(f"速度利用率(Speed Utilization Ratio): {avg_ratio:.4f}")
         return avg_ratio
 
+class EfficientManager:
+    """高效性指标管理类"""  
+    def __init__(self, data_processed):
+        self.data = data_processed
+        self.efficient = EfficientRegistry(self.data)
+    
+    def report_statistic(self):
+        """Generate the statistics and report the results."""
+        # 使用注册表批量执行指标计算
+        efficient_result = self.efficient.batch_execute()
+        return efficient_result
+
 
+    
+
+    
 # ----------------------
 # 基础指标计算函数
 # ----------------------
@@ -223,20 +307,51 @@ def speedUtilizationRatio(data_processed) -> dict:
     ratio = efficient.speed_utilization_ratio()
     return {"speedUtilizationRatio": float(ratio)}
 
-def acceleration_smoothness(data_processed) -> dict:
+def accelerationSmoothness(data_processed) -> dict:
     """计算加速度平稳度"""
     efficient = Efficient(data_processed)
     smoothness = efficient.acceleration_smoothness()
     return {"accelerationSmoothness": float(smoothness)}
 
 
+class EfficientManager:
+    """高效性指标管理类"""
+    
+    def __init__(self, data_processed):
+        self.data = data_processed
+        self.logger = LogManager().get_logger()
+        # 检查efficient_config是否为空
+        if not hasattr(data_processed, 'efficient_config') or not data_processed.efficient_config:
+            self.logger.warning("高效性配置为空,跳过高效性指标计算初始化")
+            self.registry = None
+        else:
+            self.registry = EfficientRegistry(self.data)
+    
+    def report_statistic(self):
+        """计算并报告高效性指标结果"""
+        # 如果registry为None,直接返回空字典
+        if self.registry is None:
+            self.logger.info("高效性指标管理器未初始化,返回空结果")
+            return {}
+            
+        efficient_result = self.registry.batch_execute()
+        return efficient_result
+
+
 class EfficientRegistry:
     """高效性指标注册器"""
     
     def __init__(self, data_processed):
         self.logger = LogManager().get_logger()  # 获取全局日志实例
         self.data = data_processed
-        self.eff_config = data_processed.efficient_config["efficient"]
+        # 检查efficient_config是否为空
+        if not hasattr(data_processed, 'efficient_config') or not data_processed.efficient_config:
+            self.logger.warning("高效性配置为空,跳过高效性指标计算")
+            self.eff_config = {}
+            self.metrics = []
+            self._registry = {}
+            return
+        self.eff_config = data_processed.efficient_config.get("efficient", {})
         self.metrics = self._extract_metrics(self.eff_config)
         self._registry = self._build_registry()
     
@@ -266,6 +381,11 @@ class EfficientRegistry:
     def batch_execute(self) -> dict:
         """批量执行指标计算"""
         results = {}
+        # 如果配置为空或没有注册的指标,直接返回空结果
+        if not hasattr(self, 'eff_config') or not self.eff_config or not self._registry:
+            self.logger.info("高效性配置为空或无注册指标,返回空结果")
+            return results
+            
         for name, func in self._registry.items():
             try:
                 result = func(self.data)
@@ -279,87 +399,4 @@ class EfficientRegistry:
         return results
 
 
-class EfficientManager:
-    """高效性指标管理类"""  
-    def __init__(self, data_processed):
-        self.data = data_processed
-        self.efficient = EfficientRegistry(self.data)
-    
-    def report_statistic(self):
-        """Generate the statistics and report the results."""
-        # 使用注册表批量执行指标计算
-        efficient_result = self.efficient.batch_execute()
-        return efficient_result
-
-
-def acceleration_smoothness(self):
-        """计算加速度平稳度
-        
-        加速度平稳度用以衡量车辆加减速过程的平滑程度,
-        通过计算加速度序列的波动程度(标准差)来评估。
-        平稳度指标定义为 1-σ_a/a_max(归一化后靠近1代表加速度更稳定)。
-        
-        Returns:
-            float: 加速度平稳度 (0-1之间的比率,越接近1表示越平稳)
-        """
-        # 获取加速度数据
-        # 优先使用车辆坐标系下的加速度数据
-        if 'lon_acc_vehicle' in self.ego_df.columns and 'lat_acc_vehicle' in self.ego_df.columns:
-            # 使用车辆坐标系下的加速度计算合成加速度
-            lon_acc = self.ego_df['lon_acc_vehicle'].values
-            lat_acc = self.ego_df['lat_acc_vehicle'].values
-            accel_magnitude = np.sqrt(lon_acc**2 + lat_acc**2)
-            self.logger.info("使用车辆坐标系下的加速度计算合成加速度")
-        elif 'accelX' in self.ego_df.columns and 'accelY' in self.ego_df.columns:
-            # 计算合成加速度(考虑X和Y方向)
-            accel_x = self.ego_df['accelX'].values
-            accel_y = self.ego_df['accelY'].values
-            accel_magnitude = np.sqrt(accel_x**2 + accel_y**2)
-            self.logger.info("使用accelX和accelY计算合成加速度")
-        else:
-            # 从速度差分计算加速度
-            velocity = self.ego_df['v'].values
-            time_diff = self.ego_df['simTime'].diff().fillna(0).values
-            # 避免除以零
-            time_diff[time_diff == 0] = 1e-6
-            accel_magnitude = np.abs(np.diff(velocity, prepend=velocity[0]) / time_diff)
-            self.logger.info("从速度差分计算加速度")
-        
-        # 过滤掉异常值(可选)
-        # 使用3倍标准差作为阈值
-        mean_accel = np.mean(accel_magnitude)
-        std_accel = np.std(accel_magnitude)
-        threshold = mean_accel + 3 * std_accel
-        filtered_accel = accel_magnitude[accel_magnitude <= threshold]
-        
-        # 如果过滤后数据太少,则使用原始数据
-        if len(filtered_accel) < len(accel_magnitude) * 0.8:
-            filtered_accel = accel_magnitude
-            self.logger.info("过滤后数据太少,使用原始加速度数据")
-        else:
-            self.logger.info(f"过滤掉 {len(accel_magnitude) - len(filtered_accel)} 个异常加速度值")
-        
-        # 计算加速度标准差
-        accel_std = np.std(filtered_accel)
-        
-        # 计算最大加速度(使用95百分位数以避免极端值影响)
-        accel_max = np.percentile(filtered_accel, 95)
-        
-        # 防止除以零
-        if accel_max < 0.001:
-            accel_max = 0.001
-        
-        # 计算平稳度指标: 1 - σ_a/a_max
-        smoothness = 1.0 - (accel_std / accel_max)
-        
-        # 限制在0-1范围内
-        smoothness = np.clip(smoothness, 0.0, 1.0)
-        
-        self.calculated_value['accelerationSmoothness'] = smoothness
-        
-        self.logger.info(f"加速度标准差: {accel_std:.4f} m/s²")
-        self.logger.info(f"加速度最大值(95百分位): {accel_max:.4f} m/s²")
-        self.logger.info(f"加速度平稳度(Acceleration Smoothness): {smoothness:.4f}")
-        
-        return smoothness
         

+ 25 - 2
modules/metric/function.py

@@ -871,7 +871,14 @@ class FunctionRegistry:
     def __init__(self, data_processed):
         self.logger = LogManager().get_logger()  # 获取全局日志实例
         self.data = data_processed
-        self.fun_config = data_processed.function_config["function"]
+        # 检查function_config是否为空
+        if not hasattr(data_processed, 'function_config') or not data_processed.function_config:
+            self.logger.warning("功能配置为空,跳过功能指标计算")
+            self.fun_config = {}
+            self.level_3_merics = []
+            self._registry = {}
+            return
+        self.fun_config = data_processed.function_config.get("function", {})
         self.level_3_merics = self._extract_level_3_metrics(self.fun_config)
         self._registry: Dict[str, Callable] = {}
         self._registry = self._build_registry()
@@ -907,6 +914,11 @@ class FunctionRegistry:
     def batch_execute(self) -> dict:
         """批量执行指标计算(带熔断机制)"""
         results = {}
+        # 如果配置为空或没有注册的指标,直接返回空结果
+        if not hasattr(self, 'fun_config') or not self.fun_config or not self._registry:
+            self.logger.info("功能配置为空或无注册指标,返回空结果")
+            return results
+            
         for name, func in self._registry.items():
             try:
                 result = func(self.data)  # 统一传递数据上下文
@@ -924,13 +936,24 @@ class FunctionManager:
 
     def __init__(self, data_processed):
         self.data = data_processed
-        self.function = FunctionRegistry(self.data)
+        self.logger = LogManager().get_logger()
+        # 检查function_config是否为空
+        if not hasattr(data_processed, 'function_config') or not data_processed.function_config:
+            self.logger.warning("功能配置为空,跳过功能指标计算初始化")
+            self.function = None
+        else:
+            self.function = FunctionRegistry(self.data)
 
     def report_statistic(self):
         """
         计算并报告功能指标结果。
         :return: 评估结果
         """
+        # 如果function为None,直接返回空字典
+        if self.function is None:
+            self.logger.info("功能指标管理器未初始化,返回空结果")
+            return {}
+            
         function_result = self.function.batch_execute()
 
         print("\n[功能性表现及评价结果]")

+ 25 - 3
modules/metric/safety.py

@@ -246,7 +246,14 @@ class SafetyRegistry:
     def __init__(self, data_processed):
         self.logger = LogManager().get_logger()
         self.data = data_processed
-        self.safety_config = data_processed.safety_config["safety"]
+        # 检查safety_config是否为空
+        if not hasattr(data_processed, 'safety_config') or not data_processed.safety_config:
+            self.logger.warning("安全配置为空,跳过安全指标计算")
+            self.safety_config = {}
+            self.metrics = []
+            self._registry = {}
+            return
+        self.safety_config = data_processed.safety_config.get("safety", {})
         self.metrics = self._extract_metrics(self.safety_config)
         self._registry = self._build_registry()
 
@@ -279,6 +286,11 @@ class SafetyRegistry:
     def batch_execute(self) -> dict:
         """批量执行指标计算"""
         results = {}
+        # 如果配置为空或没有注册的指标,直接返回空结果
+        if not hasattr(self, 'safety_config') or not self.safety_config or not self._registry:
+            self.logger.info("安全配置为空或无注册指标,返回空结果")
+            return results
+            
         for name, func in self._registry.items():
             try:
                 result = func(self.data)
@@ -295,12 +307,22 @@ class SafeManager:
 
     def __init__(self, data_processed):
         self.data = data_processed
-        self.registry = SafetyRegistry(self.data)
+        self.logger = LogManager().get_logger()
+        # 检查safety_config是否为空
+        if not hasattr(data_processed, 'safety_config') or not data_processed.safety_config:
+            self.logger.warning("安全配置为空,跳过安全指标计算初始化")
+            self.registry = None
+        else:
+            self.registry = SafetyRegistry(self.data)
 
     def report_statistic(self):
         """计算并报告安全指标结果"""
+        # 如果registry为None,直接返回空字典
+        if self.registry is None:
+            self.logger.info("安全指标管理器未初始化,返回空结果")
+            return {}
+            
         safety_result = self.registry.batch_execute()
-
         return safety_result
 
 

+ 24 - 2
modules/metric/traffic.py

@@ -294,7 +294,14 @@ class TrafficRegistry:
     def __init__(self, data_processed):
         self.logger = LogManager().get_logger()
         self.data = data_processed
-        self.traffic_config = data_processed.traffic_config["traffic"]
+        # 检查traffic_config是否为空
+        if not hasattr(data_processed, 'traffic_config') or not data_processed.traffic_config:
+            self.logger.warning("交通违规配置为空,跳过交通违规指标计算")
+            self.traffic_config = {}
+            self.metrics = []
+            self._registry = {}
+            return
+        self.traffic_config = data_processed.traffic_config.get("traffic", {})
         self.metrics = self._extract_metrics(self.traffic_config)
         self._registry = self._build_registry()
     
@@ -325,6 +332,11 @@ class TrafficRegistry:
     def batch_execute(self) -> dict:
         """批量执行指标计算"""
         results = {}
+        # 如果配置为空或没有注册的指标,直接返回空结果
+        if not hasattr(self, 'traffic_config') or not self.traffic_config or not self._registry:
+            self.logger.info("交通违规配置为空或无注册指标,返回空结果")
+            return results
+            
         for name, func in self._registry.items():
             try:
                 result = func(self.data)
@@ -344,10 +356,20 @@ class TrafficManager:
     def __init__(self, data_processed):
         self.data = data_processed
         self.logger = LogManager().get_logger()
-        self.registry = TrafficRegistry(self.data)
+        # 检查traffic_config是否为空
+        if not hasattr(data_processed, 'traffic_config') or not data_processed.traffic_config:
+            self.logger.warning("交通违规配置为空,跳过交通违规指标计算初始化")
+            self.registry = None
+        else:
+            self.registry = TrafficRegistry(self.data)
     
     def report_statistic(self):
         """计算并报告交通违规指标结果"""
+        # 如果registry为None,直接返回空字典
+        if self.registry is None:
+            self.logger.info("交通违规指标管理器未初始化,返回空结果")
+            return {}
+            
         traffic_result = self.registry.batch_execute()
         return traffic_result
 

+ 218 - 164
scripts/evaluator_enhanced.py

@@ -39,87 +39,98 @@ class ConfigManager:
         self.base_config: Dict[str, Any] = {}
         self.custom_config: Dict[str, Any] = {}
         self.merged_config: Dict[str, Any] = {}
+        self._config_cache = {}
     
     def split_configs(self, all_metrics_path: Path, builtin_metrics_path: Path, custom_metrics_path: Path) -> None:
         """从all_metrics_config.yaml拆分成内置和自定义配置"""
+        # 检查是否已经存在提取的配置文件,如果存在则跳过拆分过程
+        extracted_builtin_path = builtin_metrics_path.parent / f"{builtin_metrics_path.stem}_extracted{builtin_metrics_path.suffix}"
+        if extracted_builtin_path.exists() and custom_metrics_path.exists():
+            self.logger.info(f"使用已存在的拆分配置文件: {extracted_builtin_path}")
+            return
+            
         try:
-            with open(all_metrics_path, 'r', encoding='utf-8') as f:
-                all_metrics_dict = yaml.safe_load(f) or {}
-            with open(builtin_metrics_path, 'r', encoding='utf-8') as f:
-                builtin_metrics_dict = yaml.safe_load(f) or {}
-            custom_metrics_dict = self._find_custom_metrics(all_metrics_dict, builtin_metrics_dict)
+            # 使用缓存加载配置文件,避免重复读取
+            all_metrics_dict = self._safe_load_config(all_metrics_path)
+            builtin_metrics_dict = self._safe_load_config(builtin_metrics_path)
+            
+            # 递归提取内置和自定义指标
+            extracted_builtin_metrics, custom_metrics_dict = self._split_metrics_recursive(
+                all_metrics_dict, builtin_metrics_dict
+            )
+            
+            # 保存提取的内置指标到新文件
+            with open(extracted_builtin_path, 'w', encoding='utf-8') as f:
+                yaml.dump(extracted_builtin_metrics, f, allow_unicode=True, sort_keys=False, indent=2)
+            self.logger.info(f"拆分配置: 提取的内置指标已保存到 {extracted_builtin_path}")
+            
             if custom_metrics_dict:
                 with open(custom_metrics_path, 'w', encoding='utf-8') as f:
                     yaml.dump(custom_metrics_dict, f, allow_unicode=True, sort_keys=False, indent=2)
-                self.logger.info(f"Split configs: custom metrics saved to {custom_metrics_path}")
+                self.logger.info(f"拆分配置: 自定义指标已保存到 {custom_metrics_path}")
+                
         except Exception as err:
-            self.logger.error(f"Failed to split configs: {str(err)}")
+            self.logger.error(f"拆分配置失败: {str(err)}")
             raise
     
-    def _find_custom_metrics(self, all_metrics, builtin_metrics, current_path=""):
-        """递归比较找出自定义指标"""
+    def _split_metrics_recursive(self, all_dict: Dict, builtin_dict: Dict) -> Tuple[Dict, Dict]:
+        """递归拆分内置和自定义指标配置"""
+        extracted_builtin = {}
         custom_metrics = {}
         
-        if isinstance(all_metrics, dict) and isinstance(builtin_metrics, dict):
-            for key in all_metrics:
-                if key not in builtin_metrics:
-                    custom_metrics[key] = all_metrics[key]
+        for key, value in all_dict.items():
+            if key in builtin_dict:
+                # 如果是字典类型,继续递归
+                if isinstance(value, dict) and isinstance(builtin_dict[key], dict):
+                    sub_builtin, sub_custom = self._split_metrics_recursive(value, builtin_dict[key])
+                    if sub_builtin:
+                        extracted_builtin[key] = sub_builtin
+                    if sub_custom:
+                        custom_metrics[key] = sub_custom
                 else:
-                    child_custom = self._find_custom_metrics(
-                        all_metrics[key], 
-                        builtin_metrics[key],
-                        f"{current_path}.{key}" if current_path else key
-                    )
-                    if child_custom:
-                        custom_metrics[key] = child_custom
-        elif all_metrics != builtin_metrics:
-            return all_metrics
-        
-        if custom_metrics:
-            return self._ensure_structure(custom_metrics, all_metrics, current_path)
-        return None
-    
-    def _ensure_structure(self, metrics_dict, full_dict, path):
-        """确保每级包含name和priority"""
-        if not isinstance(metrics_dict, dict):
-            return metrics_dict
-        
-        current = full_dict
-        for key in path.split('.'):
-            if key in current:
-                current = current[key]
+                    # 如果不是字典类型,直接复制
+                    extracted_builtin[key] = value
             else:
-                break
+                # 如果键不在内置配置中,归类为自定义指标
+                custom_metrics[key] = value
         
-        result = {}
-        if isinstance(current, dict):
-            if 'name' in current:
-                result['name'] = current['name']
-            if 'priority' in current:
-                result['priority'] = current['priority']
-        
-        for key, value in metrics_dict.items():
-            if key not in ['name', 'priority']:
-                result[key] = self._ensure_structure(value, full_dict, f"{path}.{key}" if path else key)
-        
-        return result
-
+        return extracted_builtin, custom_metrics
+    
     def load_configs(self, all_config_path: Optional[Path], builtin_metrics_path: Optional[Path], custom_metrics_path: Optional[Path]) -> Dict[str, Any]:
         """加载并合并配置"""
+        # 如果已经加载过配置,直接返回缓存的结果
+        cache_key = f"{all_config_path}_{builtin_metrics_path}_{custom_metrics_path}"
+        if cache_key in self._config_cache:
+            self.logger.info("使用缓存的配置数据")
+            return self._config_cache[cache_key]
+            
         # 自动拆分配置
+        extracted_builtin_path = None
         
-        if all_config_path.exists():
+        if all_config_path and all_config_path.exists():
+            # 生成提取的内置指标配置文件路径
+            extracted_builtin_path = builtin_metrics_path.parent / f"{builtin_metrics_path.stem}_extracted{builtin_metrics_path.suffix}"
             self.split_configs(all_config_path, builtin_metrics_path, custom_metrics_path)
             
-        self.base_config = self._safe_load_config(builtin_metrics_path) if builtin_metrics_path else {}
+        # 优先使用提取的内置指标配置
+        if extracted_builtin_path and extracted_builtin_path.exists():
+            self.base_config = self._safe_load_config(extracted_builtin_path)
+        else:
+            self.base_config = self._safe_load_config(builtin_metrics_path) if builtin_metrics_path else {}
+            
         self.custom_config = self._safe_load_config(custom_metrics_path) if custom_metrics_path else {}
-        self.merged_config = self._merge_configs(self.base_config, self.custom_config)
-        return self.merged_config
-    
+        if all_config_path and all_config_path.exists():
+            self.merged_config = self._safe_load_config(all_config_path)
+            # 缓存配置结果
+            self._config_cache[cache_key] = self.merged_config
+            return self.merged_config
+        return {}
+    
+    @lru_cache(maxsize=16)
     def _safe_load_config(self, config_path: Path) -> Dict[str, Any]:
-        """安全加载YAML配置"""
+        """安全加载YAML配置,使用lru_cache减少重复读取"""
         try:
-            if not config_path.exists():
+            if not config_path or not config_path.exists():
                 self.logger.warning(f"Config file not found: {config_path}")
                 return {}
             with config_path.open('r', encoding='utf-8') as f:
@@ -130,32 +141,6 @@ class ConfigManager:
             self.logger.error(f"Failed to load config {config_path}: {str(err)}")
             return {}
     
-    def _merge_configs(self, builtin_config: Dict, custom_config: Dict) -> Dict:
-        """智能合并配置"""
-        merged_config = builtin_config.copy()
-        for level1_key, level1_value in custom_config.items():
-            if not isinstance(level1_value, dict) or 'name' not in level1_value:
-                if level1_key not in merged_config:
-                    merged_config[level1_key] = level1_value
-                continue
-            if level1_key not in merged_config:
-                merged_config[level1_key] = level1_value
-            else:
-                for level2_key, level2_value in level1_value.items():
-                    if level2_key in ['name', 'priority']:
-                        continue
-                    if isinstance(level2_value, dict):
-                        if level2_key not in merged_config[level1_key]:
-                            merged_config[level1_key][level2_key] = level2_value
-                        else:
-                            for level3_key, level3_value in level2_value.items():
-                                if level3_key in ['name', 'priority']:
-                                    continue
-                                if isinstance(level3_value, dict):
-                                    if level3_key not in merged_config[level1_key][level2_key]:
-                                        merged_config[level1_key][level2_key][level3_key] = level3_value
-        return merged_config
-    
     def get_config(self) -> Dict[str, Any]:
         return self.merged_config
     
@@ -208,6 +193,16 @@ class MetricLoader:
             self.logger.info("No custom metrics path or path not exists")
             return {}
 
+        # 检查是否有新的自定义指标文件
+        current_files = set(f.name for f in custom_metrics_path.glob(CUSTOM_METRIC_FILE_PATTERN) 
+                          if f.name.startswith(CUSTOM_METRIC_PREFIX))
+        loaded_files = set(self.custom_metric_modules.keys())
+        
+        # 如果没有新文件且已有加载的模块,直接返回
+        if self.custom_metric_modules and not (current_files - loaded_files):
+            self.logger.info(f"No new custom metrics to load, using {len(self.custom_metric_modules)} cached modules")
+            return self.custom_metric_modules
+
         loaded_count = 0
         for py_file in custom_metrics_path.glob(CUSTOM_METRIC_FILE_PATTERN):
             if py_file.name.startswith(CUSTOM_METRIC_PREFIX):
@@ -311,12 +306,28 @@ class EvaluationEngine:
         metric_modules = self.metric_loader.get_builtin_metrics()
         raw_results: Dict[str, Any] = {}
         
-        with ThreadPoolExecutor(max_workers=len(metric_modules)) as executor:
+        # 获取配置中实际存在的指标
+        config = self.config_manager.get_config()
+        available_metrics = {
+            metric_name for metric_name in metric_modules.keys()
+            if metric_name in config and isinstance(config[metric_name], dict)
+        }
+        
+        # 只处理配置中存在的指标
+        filtered_modules = {
+            name: module for name, module in metric_modules.items()
+            if name in available_metrics
+        }
+        
+        # 优化线程池大小,避免创建过多线程
+        max_workers = min(len(filtered_modules), DEFAULT_WORKERS)
+        
+        with ThreadPoolExecutor(max_workers=max_workers) as executor:
             futures = {
                 executor.submit(self._run_module, module, data, module_name): module_name
-                for module_name, module in metric_modules.items()
+                for module_name, module in filtered_modules.items()
             }
-
+    
             for future in futures:
                 module_name = futures[future]
                 try:
@@ -343,42 +354,55 @@ class EvaluationEngine:
             
         custom_results = {}
         
-        for metric_key, metric_info in custom_metrics.items():
-            try:
-                level1, level2, level3 = metric_key.split('.')
-                
-                if metric_info['type'] == 'class':
-                    metric_class = metric_info['class']
-                    metric_instance = metric_class(data)
-                    metric_result = metric_instance.calculate()
-                else:
-                    module = metric_info['module']
-                    metric_result = module.evaluate(data)
-                
-                if level1 not in custom_results:
-                    custom_results[level1] = {}
-                custom_results[level1] = metric_result
-                
-                self.logger.info(f"Calculated custom metric: {level1}.{level2}.{level3}")
-                
-            except Exception as e:
-                self.logger.error(f"Custom metric {metric_key} failed: {str(e)}")
-                
+        # 使用线程池并行处理自定义指标
+        max_workers = min(len(custom_metrics), DEFAULT_WORKERS)
+        
+        with ThreadPoolExecutor(max_workers=max_workers) as executor:
+            futures = {}
+            
+            # 提交所有自定义指标任务
+            for metric_key, metric_info in custom_metrics.items():
+                futures[executor.submit(self._run_custom_metric, metric_key, metric_info, data)] = metric_key
+            
+            # 收集结果
+            for future in futures:
+                metric_key = futures[future]
                 try:
-                    level1, level2, level3 = metric_key.split('.')
-                    
-                    if level1 not in custom_results:
-                        custom_results[level1] = {}
-                        
-                    custom_results[level1] = {
-                        "status": "error",
-                        "message": str(e),
-                        "timestamp": datetime.now().isoformat(),
-                    }
-                except Exception:
-                    pass
+                    level1, result = future.result()
+                    if level1:
+                        custom_results[level1] = result
+                except Exception as e:
+                    self.logger.error(f"Custom metric {metric_key} execution failed: {str(e)}")
         
         return custom_results
+        
+    def _run_custom_metric(self, metric_key: str, metric_info: Dict, data: Any) -> Tuple[str, Dict]:
+        """执行单个自定义指标"""
+        try:
+            level1, level2, level3 = metric_key.split('.')
+            
+            if metric_info['type'] == 'class':
+                metric_class = metric_info['class']
+                metric_instance = metric_class(data)
+                metric_result = metric_instance.calculate()
+            else:
+                module = metric_info['module']
+                metric_result = module.evaluate(data)
+            
+            self.logger.info(f"Calculated custom metric: {level1}.{level2}.{level3}")
+            return level1, metric_result
+            
+        except Exception as e:
+            self.logger.error(f"Custom metric {metric_key} failed: {str(e)}")
+            try:
+                level1 = metric_key.split('.')[0]
+                return level1, {
+                    "status": "error",
+                    "message": str(e),
+                    "timestamp": datetime.now().isoformat(),
+                }
+            except Exception:
+                return "", {}
     
     def _process_merged_results(self, raw_results: Dict, custom_results: Dict) -> Dict:
         """处理合并后的评估结果"""
@@ -454,14 +478,25 @@ class DataProcessor:
         self.logger = logger
         self.data_path = data_path
         self.config_path = config_path
-        self.processor = self._load_processor()
         self.case_name = self.data_path.name
+        self._processor = None
+    
+    @property
+    def processor(self) -> Any:
+        """懒加载数据处理器,只在首次访问时创建"""
+        if self._processor is None:
+            self._processor = self._load_processor()
+        return self._processor
     
     def _load_processor(self) -> Any:
         """加载数据处理器"""
         try:
+            start_time = time.perf_counter()
             from modules.lib import data_process
-            return data_process.DataPreprocessing(self.data_path, self.config_path)
+            processor = data_process.DataPreprocessing(self.data_path, self.config_path)
+            elapsed_time = time.perf_counter() - start_time
+            self.logger.info(f"Data processor loaded in {elapsed_time:.2f}s")
+            return processor
         except ImportError as e:
             self.logger.error(f"Failed to load data processor: {str(e)}")
             raise RuntimeError(f"Failed to load data processor: {str(e)}") from e
@@ -505,15 +540,26 @@ class EvaluationPipeline:
     def execute(self) -> Dict[str, Any]:
         """执行评估流水线"""
         try:
+            # 只在首次运行时验证数据路径
             self.data_processor.validate()
             
             self.logger.info(f"Start evaluation: {self.data_path.name}")
             start_time = time.perf_counter()
+            
+            # 性能分析日志
+            config_start = time.perf_counter()
             results = self.evaluation_engine.evaluate(self.data_processor.processor)
-            elapsed_time = time.perf_counter() - start_time
-            self.logger.info(f"Evaluation completed, time: {elapsed_time:.2f}s")
+            eval_time = time.perf_counter() - config_start
             
+            # 生成报告
+            report_start = time.perf_counter()
             report = self._generate_report(self.data_processor.case_name, results)
+            report_time = time.perf_counter() - report_start
+            
+            # 总耗时
+            elapsed_time = time.perf_counter() - start_time
+            self.logger.info(f"Evaluation completed, time: {elapsed_time:.2f}s (评估: {eval_time:.2f}s, 报告: {report_time:.2f}s)")
+            
             return report
             
         except Exception as e:
@@ -532,32 +578,32 @@ class EvaluationPipeline:
         # 初始化计数器
         counters = {'p0': 0, 'p1': 0, 'p2': 0}
         
-        # 遍历报告中的所有键,包括内置和自定义一级指标
-        for category, category_data in report.items():
-            # 跳过非指标键(如metadata等)
-            if not isinstance(category_data, dict) or category == "metadata":
-                continue
-                
-            # 如果该维度的结果为False,根据其priority增加对应计数
-            if not category_data.get('result', True):
-                priority = category_data.get('priority')
-                if priority == 0:
-                    counters['p0'] += 1
-                elif priority == 1:
-                    counters['p1'] += 1
-                elif priority == 2:
-                    counters['p2'] += 1
+        # 优化:一次性收集所有失败的指标
+        failed_categories = [
+            (category, category_data.get('priority'))
+            for category, category_data in report.items()
+            if isinstance(category_data, dict) and category != "metadata" and not category_data.get('result', True)
+        ]
+        
+        # 计数
+        for _, priority in failed_categories:
+            if priority == 0:
+                counters['p0'] += 1
+            elif priority == 1:
+                counters['p1'] += 1
+            elif priority == 2:
+                counters['p2'] += 1
         
         # 阈值判断逻辑
-        thresholds_exceeded = (
-            counters['p0'] > thresholds['T0'],
-            counters['p1'] > thresholds['T1'],
+        overall_result = not (
+            counters['p0'] > thresholds['T0'] or
+            counters['p1'] > thresholds['T1'] or
             counters['p2'] > thresholds['T2']
         )
         
         # 生成处理后的报告
         processed_report = report.copy()
-        processed_report['overall_result'] = not any(thresholds_exceeded)
+        processed_report['overall_result'] = overall_result
         
         # 添加统计信息
         processed_report['threshold_checks'] = {
@@ -567,7 +613,7 @@ class EvaluationPipeline:
             'actual_counts': counters
         }
         
-        self.logger.info(f"Added overall result: {processed_report['overall_result']}")
+        self.logger.info(f"Added overall result: {overall_result}")
         return processed_report
         
     def _generate_report(self, case_name: str, results: Dict[str, Any]) -> Dict[str, Any]:
@@ -579,7 +625,7 @@ class EvaluationPipeline:
         results["metadata"] = {
             "case_name": case_name,
             "timestamp": datetime.now().isoformat(),
-            "version": "3.1.0",
+            "version": "1.0",
         }
         
         # 添加总体结果评估
@@ -598,50 +644,58 @@ def main():
         formatter_class=argparse.ArgumentDefaultsHelpFormatter,
     )
     
-    parser.add_argument(
-        "--logPath",
-        type=str,
-        default="test.log",
-        help="Log file path",
-    )
+    # 必要参数
     parser.add_argument(
         "--dataPath",
         type=str,
-        default=r"D:\Cicv\招远\AD_GBT41798-2022_TrafficSignalRecognitionAndResponse_LST_01",
+        default=r"D:\Kevin\zhaoyuan\data\V2V_CSAE53-2020_ForwardCollision_LST_01-02",
         help="Input data directory",
     )
     
-    parser.add_argument(
+    # 配置参数
+    config_group = parser.add_argument_group('Configuration')
+    config_group.add_argument(
         "--allConfigPath",
         type=str,
-        default=r"D:\Cicv\招远\zhaoyuan\config\all_metrics_config.yaml",
+        default=r"config/all_metrics_config.yaml",
         help="Full metrics config file path (built-in + custom)",
     )
-    
-    parser.add_argument(
+    config_group.add_argument(
         "--baseConfigPath",
         type=str,
-        default=r"D:\Cicv\招远\zhaoyuan\config\builtin_metrics_config.yaml",
+        default=r"config/builtin_metrics_config.yaml",
         help="Built-in metrics config file path",
     )
-    parser.add_argument(
+    config_group.add_argument(
+        "--customConfigPath",
+        type=str,
+        default=r"config/custom_metrics_config.yaml",
+        help="Custom metrics config path (optional)",
+    )
+    
+    # 输出参数
+    output_group = parser.add_argument_group('Output')
+    output_group.add_argument(
+        "--logPath",
+        type=str,
+        default="test.log",
+        help="Log file path",
+    )
+    output_group.add_argument(
         "--reportPath",
         type=str,
         default="reports",
         help="Output report directory",
     )
-    parser.add_argument(
+    
+    # 扩展参数
+    ext_group = parser.add_argument_group('Extensions')
+    ext_group.add_argument(
         "--customMetricsPath",
         type=str,
         default="custom_metrics",
         help="Custom metrics scripts directory (optional)",
     )
-    parser.add_argument(
-        "--customConfigPath",
-        type=str,
-        default=r"D:\Cicv\招远\zhaoyuan\config\custom_metrics_config.yaml",
-        help="Custom metrics config path (optional)",
-    )
     
     args = parser.parse_args()
 

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است