5 커밋 563f309231 ... 08f3150662

작성자 SHA1 메시지 날짜
  cicv 08f3150662 增加指标绘图数据csv保存及绘图功能,csv保存到项目本地data路径,修改多个bug,包括shake等,data_process增加车身坐标系转换功能 1 주 전
  cicv ee1f60fc35 增加指标绘图数据csv保存及绘图功能,csv保存到项目本地data路径,修改多个bug,包括shake等,data_process增加车身坐标系转换功能 1 주 전
  XGJ_zhaoyuan 563f309231 修改“预警时机相对背景车辆减速度达到-4 m/s2后延时”指标名称,删除延迟指标 2 주 전
  XGJ_zhaoyuan fcb00a4f81 新增PSD、PET指标 2 주 전
  XGJ_zhaoyuan 568536053a 新增DTC指标 2 주 전

+ 0 - 22
.vscode/launch.json

@@ -1,22 +0,0 @@
-{
-    // Use IntelliSense to learn about possible attributes.
-    // Hover to view descriptions of existing attributes.
-    // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
-    "version": "0.2.0",
-    "configurations": [
-        {
-            "name": "Python Debugger: Current File",
-            "type": "debugpy",
-            "request": "launch",
-            "program": "${file}",
-            "console": "integratedTerminal"
-        },
-        {
-            "name": "Python Debugger: Current File",
-            "type": "debugpy",
-            "request": "launch",
-            "program": "${file}",
-            "console": "integratedTerminal"
-        }
-    ]
-}

+ 51 - 66
config/all_metrics_config.yaml

@@ -43,26 +43,21 @@ safety:
       priority: 0
       max: 2000.0
       min: 1.5
-    TLC:
-      name: TLC
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    TTB:
-      name: TTB
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    TM:
-      name: TM
-      priority: 0
-      max: 2000.0
-      min: 1.5
-    PET:
-      name: PET
-      priority: 0
-      max: 2000.0
-      min: 1.5
+    # TLC:
+    #   name: TLC
+    #   priority: 0
+    #   max: 2000.0
+    #   min: 1.5
+    # TTB:
+    #   name: TTB
+    #   priority: 0
+    #   max: 2000.0
+    #   min: 1.5
+    # TM:
+    #   name: TM
+    #   priority: 0
+    #   max: 2000.0
+    #   min: 1.5
   safeDistance:
     name: safeDistance
     priority: 0
@@ -76,11 +71,6 @@ safety:
       priority: 0
       max: 2000.0
       min: 2.0
-    DTC:
-      name: DTC
-      priority: 0
-      max: 2000.0
-      min: 1.5
   safeAcceleration:
     name: safeAcceleration
     priority: 0
@@ -102,11 +92,6 @@ safety:
       priority: 0
       max: 10.0
       min: 0.0
-    PSD:
-      name: PSD
-      priority: 0
-      max: 2000.0
-      min: 1.5
 
 user:
   name: user
@@ -115,7 +100,7 @@ user:
     name: safetime
     priority: 0
     CustomTTC:
-      name: CustomTTC
+      name: customTTC
       priority: 0
       max: 20.0
       min: 3.5
@@ -126,8 +111,8 @@ comfort:
   comfortLat:
     name: comfortLat
     priority: 0
-    Weaving:
-      name: Weaving
+    zigzag:
+      name: zigzag
       priority: 0
       max: 0
       min: 0
@@ -154,39 +139,39 @@ comfort:
       priority: 0
       max: 0
       min: 0
-  comforDynamic:
-    name: comforDynamic
-    priority: 0
-    rideQualityScore:
-      name: rideQualityScore
-      priority: 0
-      max: 0
-      min: 0
-    motionSickness:
-      name: motionSickness
-      priority: 0
-      max: 0
-      min: 0
-    motionComfortIndex:
-      name: motionComfortIndex
-      priority: 0
-      max: 0
-      min: 0
-    vdv:
-      name: vdv
-      priority: 0
-      max: 0
-      min: 0
-    ava_vav:
-      name: ava_vav
-      priority: 0
-      max: 0
-      min: 0
-    msdv:
-      name: msdv
-      priority: 0
-      max: 0
-      min: 0
+  # comforDynamic:
+  #   name: comforDynamic
+  #   priority: 0
+  #   rideQualityScore:
+  #     name: rideQualityScore
+  #     priority: 0
+  #     max: 100
+  #     min: 80
+  #   motionSickness:
+  #     name: motionSickness
+  #     priority: 0
+  #     max: 30.0
+  #     min: 0.0
+  #   motionComfortIndex:
+  #     name: motionComfortIndex
+  #     priority: 0
+  #     max: 10.0
+  #     min: 8.0
+  #   vdv:
+  #     name: vdv
+  #     priority: 0
+  #     max: 8.0
+  #     min: 0
+  #   ava_vav:
+  #     name: ava_vav
+  #     priority: 0
+  #     max: 0.63
+  #     min: 0.0
+  #   msdv:
+  #     name: msdv
+  #     priority: 0
+  #     max: 6.0
+  #     min: 0.0
 
 efficient:
   name: efficient

+ 1 - 1
config/metrics_config.yaml

@@ -125,7 +125,7 @@ function:
   priority: 0
   scenario:
     name: ForwardCollision
-    priority: 0
+    priority: 0 
     latestWarningDistance_TTC_LST:
       name: latestWarningDistance_TTC_LST
       priority: 0

+ 10 - 13
custom_metrics/metric_user_safeTime_CustomTTC.py

@@ -1,33 +1,31 @@
-"""
-自定义TTC指标评测脚本示例
+"""自定义TTC指标评测脚本示例
 
-此脚本实现了一个自定义的TTC(Time To Collision)指标评测逻辑
+此脚本实现了一个自定义的TTC(Time To Collision)指标评测逻辑,并包含图表数据生成功能
 """
 from typing import Dict, Any
 import math
 from modules.lib.score import Score
 import logging
-import inspect  # 添加缺少的inspect模块导入
+import inspect
+
+from pathlib import Path
 
 def evaluate(data) -> Dict[str, Any]:
-    """
-    评测自定义TTC指标
+    """评测自定义TTC指标
     
     Args:
         data: 评测数据,包含场景、轨迹等信息
         config: 指标配置,包含阈值等参数
         
     Returns:
-        评测结果,包含分数、详情等
+        评测结果,包含分数、详情等和图表数据
     """
-
     try:
         # 计算最小TTC值
         min_ttc = calculate_min_ttc(data.ego_data)
         
-        # 使用Score类评估结果
-        # evaluator = Score(config)   
-        # result = evaluator.evaluate(min_ttc)
+        
+        
         return min_ttc
         
     except Exception as e:
@@ -41,8 +39,7 @@ def evaluate(data) -> Dict[str, Any]:
     
 
 def calculate_min_ttc(data):
-    """
-    计算最小TTC值
+    """计算最小TTC值
     
     Args:
         data: 轨迹数据列表

+ 1899 - 0
modules/lib/chart_generator.py

@@ -0,0 +1,1899 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2023 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn)
+@Data:              2023/06/25
+@Last Modified:     2025/05/20
+@Summary:           Chart generation utilities for metrics visualization
+"""
+
+import os
+import numpy as np
+import pandas as pd
+import matplotlib
+matplotlib.use('Agg')  # 使用非图形界面的后端
+import matplotlib.pyplot as plt
+from typing import Optional, Dict, List, Any, Union
+from pathlib import Path
+
+from modules.lib.log_manager import LogManager
+
+def generate_function_chart_data(function_calculator, metric_name: str, output_dir: Optional[str] = None) -> Optional[str]:
+    """
+    Generate chart data for function metrics
+    
+    Args:
+        function_calculator: FunctionCalculator instance
+        metric_name: Metric name
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 确保输出目录存在
+        if output_dir:
+            os.makedirs(output_dir, exist_ok=True)
+        else:
+            output_dir = os.getcwd()
+            
+        # 根据指标名称选择不同的图表生成方法
+        if metric_name.lower() == 'latestwarningdistance_ttc_lst':
+            return generate_warning_ttc_chart(function_calculator, output_dir)
+        else:
+            logger.warning(f"Chart generation not implemented for metric [{metric_name}]")
+            return None
+            
+    except Exception as e:
+        logger.error(f"Failed to generate chart data: {str(e)}", exc_info=True)
+        return None
+
+def generate_warning_ttc_chart(function_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate TTC warning chart with data visualization.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+
+    Args:
+        function_calculator: FunctionCalculator instance
+        output_dir: Output directory
+
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+
+    try:
+        # 获取数据
+        ego_df = function_calculator.ego_data.copy()
+        scenario_name = function_calculator.data.function_config["function"]["scenario"]["name"]
+        correctwarning = scenario_sign_dict[scenario_name]
+
+        warning_dist = calculate_distance(ego_df, correctwarning)
+        warning_speed = calculate_relative_speed(ego_df, correctwarning)
+
+        if warning_dist.empty:
+            logger.warning("Cannot generate TTC warning chart: empty data")
+            return None
+
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+
+        # 保存 CSV 数据
+        csv_filename = os.path.join(output_dir, f"warning_ttc_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': ego_df['simTime'],
+            'warning_distance': warning_dist,
+            'warning_speed': warning_speed,
+            'ttc': warning_dist / warning_speed
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Warning TTC data saved to: {csv_filename}")
+
+        # 从 CSV 读取数据
+        df = pd.read_csv(csv_filename)
+
+        # 创建图表
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+
+        # 图 1:预警距离
+        ax1 = plt.subplot(3, 1, 1)
+        ax1.plot(df['simTime'], df['warning_distance'], 'b-', label='Warning Distance')
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Distance (m)')
+        ax1.set_title('Warning Distance Over Time')
+        ax1.grid(True)
+        ax1.legend()
+
+        # 图 2:相对速度
+        ax2 = plt.subplot(3, 1, 2)
+        ax2.plot(df['simTime'], df['warning_speed'], 'g-', label='Relative Speed')
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Speed (m/s)')
+        ax2.set_title('Relative Speed Over Time')
+        ax2.grid(True)
+        ax2.legend()
+
+        # 图 3:TTC
+        ax3 = plt.subplot(3, 1, 3)
+        ax3.plot(df['simTime'], df['ttc'], 'r-', label='TTC')
+        ax3.set_xlabel('Time (s)')
+        ax3.set_ylabel('TTC (s)')
+        ax3.set_title('Time To Collision (TTC) Over Time')
+        ax3.grid(True)
+        ax3.legend()
+
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"warning_ttc_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+
+        logger.info(f"Warning TTC chart saved to: {chart_filename}")
+        return chart_filename
+
+    except Exception as e:
+        logger.error(f"Failed to generate warning TTC chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_comfort_chart_data(comfort_calculator, metric_name: str, output_dir: Optional[str] = None) -> Optional[str]:
+    """
+    Generate chart data for comfort metrics
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        metric_name: Metric name
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 确保输出目录存在
+        if output_dir:
+            os.makedirs(output_dir, exist_ok=True)
+        else:
+            output_dir = os.getcwd()
+            
+        # 根据指标名称选择不同的图表生成方法
+        if metric_name.lower() == 'shake':
+            return generate_shake_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'zigzag':
+            return generate_zigzag_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'cadence':
+            return generate_cadence_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'slambrake':
+            return generate_slam_brake_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'slamaccelerate':
+            return generate_slam_accelerate_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'vdv':
+            return generate_vdv_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'ava_vav':
+            return generate_ava_vav_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'msdv':
+            return generate_msdv_chart(comfort_calculator, output_dir)
+        elif metric_name.lower() == 'motionsickness':
+            return generate_motion_sickness_chart(comfort_calculator, output_dir)
+        else:
+            logger.warning(f"Chart generation not implemented for metric [{metric_name}]")
+            return None
+            
+    except Exception as e:
+        logger.error(f"Failed to generate chart data: {str(e)}", exc_info=True)
+        return None
+
+def generate_shake_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate shake metric chart with orange background for shake events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        shake_events = comfort_calculator.shake_events
+
+        if df.empty:
+            logger.warning("Cannot generate shake chart: empty data")
+            return None
+
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"shake_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'lat_acc': df['lat_acc'],
+            'lat_acc_rate': df['lat_acc_rate'],
+            'speedH_std': df['speedH_std'],
+            'lat_acc_threshold': df.get('lat_acc_threshold', pd.Series([None]*len(df))),
+            'lat_acc_rate_threshold': 0.5,
+            'speedH_std_threshold': df.get('speedH_threshold', pd.Series([None]*len(df))),
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Shake data saved to: {csv_filename}")
+
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+
+        # 创建图表(第三步)
+        import matplotlib.pyplot as plt
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+
+        # 图 1:横向加速度
+        ax1 = plt.subplot(3, 1, 1)
+        ax1.plot(df['simTime'], df['lat_acc'], 'b-', label='Lateral Acceleration')
+        if 'lat_acc_threshold' in df.columns:
+            ax1.plot(df['simTime'], df['lat_acc_threshold'], 'r--', label='lat_acc_threshold')
+
+        for idx, event in enumerate(shake_events):
+            label = 'Shake Event' if idx == 0 else None
+            ax1.axvspan(event['start_time'], event['end_time'], alpha=0.3, color='orange', label=label)
+
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Lateral Acceleration (m/s²)')
+        ax1.set_title('Shake Event Detection - Lateral Acceleration')
+        ax1.grid(True)
+        ax1.legend()
+
+        # 图 2:lat_acc_rate
+        ax2 = plt.subplot(3, 1, 2)
+        ax2.plot(df['simTime'], df['lat_acc_rate'], 'g-', label='lat_acc_rate')
+        ax2.axhline(
+            y=0.5, color='orange', linestyle='--', linewidth=1.2, label='lat_acc_rate_threshold'
+        )
+
+        for idx, event in enumerate(shake_events):
+            label = 'Shake Event' if idx == 0 else None
+            ax2.axvspan(event['start_time'], event['end_time'], alpha=0.3, color='orange', label=label)
+
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Angular Velocity (m/s³)')
+        ax2.set_title('Shake Event Detection - lat_acc_rate')
+        ax2.grid(True)
+        ax2.legend()
+
+        # 图 3:speedH_std
+        ax3 = plt.subplot(3, 1, 3)
+        ax3.plot(df['simTime'], df['speedH_std'], 'b-', label='speedH_std')
+        if 'speedH_std_threshold' in df.columns:
+            ax3.plot(df['simTime'], df['speedH_std_threshold'], 'r--', label='speedH_threshold')
+
+        for idx, event in enumerate(shake_events):
+            label = 'Shake Event' if idx == 0 else None
+            ax3.axvspan(event['start_time'], event['end_time'], alpha=0.3, color='orange', label=label)
+
+        ax3.set_xlabel('Time (s)')
+        ax3.set_ylabel('Angular Velocity (deg/s)')
+        ax3.set_title('Shake Event Detection - speedH_std')
+        ax3.grid(True)
+        ax3.legend()
+
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"shake_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+
+        logger.info(f"Shake chart saved to: {chart_filename}")
+        return chart_filename
+
+    except Exception as e:
+        logger.error(f"Failed to generate shake chart: {str(e)}", exc_info=True)
+        return None
+
+
+def generate_zigzag_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate zigzag metric chart with orange background for zigzag events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        zigzag_events = comfort_calculator.discomfort_df[
+            comfort_calculator.discomfort_df['type'] == 'zigzag'
+        ].copy()
+
+        if df.empty:
+            logger.warning("Cannot generate zigzag chart: empty data")
+            return None
+
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"zigzag_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'speedH': df['speedH'],
+            'posH': df['posH'],
+            'min_speedH_threshold': -2.3,  # 可替换为动态阈值
+            'max_speedH_threshold': 2.3
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Zigzag data saved to: {csv_filename}")
+
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+
+        # 创建图表(第三步)
+        import matplotlib.pyplot as plt
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+
+        # ===== 子图1:Yaw Rate =====
+        ax1 = plt.subplot(2, 1, 1)
+        ax1.plot(df['simTime'], df['speedH'], 'g-', label='Yaw Rate')
+
+        # 添加 speedH 上下限阈值线
+        ax1.axhline(y=2.3, color='m', linestyle='--', linewidth=1.2, label='Max Threshold (+2.3)')
+        ax1.axhline(y=-2.3, color='r', linestyle='--', linewidth=1.2, label='Min Threshold (-2.3)')
+
+        # 添加橙色背景:Zigzag Events
+        for idx, event in zigzag_events.iterrows():
+            label = 'Zigzag Event' if idx == 0 else None
+            ax1.axvspan(event['start_time'], event['end_time'], 
+                        alpha=0.3, color='orange', label=label)
+
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Angular Velocity (deg/s)')
+        ax1.set_title('Zigzag Event Detection - Yaw Rate')
+        ax1.grid(True)
+        ax1.legend(loc='upper left')
+
+        # ===== 子图2:Yaw Angle =====
+        ax2 = plt.subplot(2, 1, 2)
+        ax2.plot(df['simTime'], df['posH'], 'b-', label='Yaw')
+
+        # 添加橙色背景:Zigzag Events
+        for idx, event in zigzag_events.iterrows():
+            label = 'Zigzag Event' if idx == 0 else None
+            ax2.axvspan(event['start_time'], event['end_time'], 
+                        alpha=0.3, color='orange', label=label)
+
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Yaw (deg)')
+        ax2.set_title('Zigzag Event Detection - Yaw Angle')
+        ax2.grid(True)
+        ax2.legend(loc='upper left')
+
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"zigzag_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+
+        logger.info(f"Zigzag chart saved to: {chart_filename}")
+        return chart_filename
+
+    except Exception as e:
+        logger.error(f"Failed to generate zigzag chart: {str(e)}", exc_info=True)
+        return None
+        
+def generate_cadence_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate cadence metric chart with orange background for cadence events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        cadence_events = comfort_calculator.discomfort_df[comfort_calculator.discomfort_df['type'] == 'cadence'].copy()
+        
+        if df.empty:
+            logger.warning("Cannot generate cadence chart: empty data")
+            return None
+            
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"cadence_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'lon_acc': df['lon_acc'],
+            'v': df['v'],
+            'ip_acc': df.get('ip_acc', pd.Series([None]*len(df))),
+            'ip_dec': df.get('ip_dec', pd.Series([None]*len(df)))
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Cadence data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        import matplotlib.pyplot as plt
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+        
+        # 图 1:纵向加速度
+        ax1 = plt.subplot(2, 1, 1)
+        ax1.plot(df['simTime'], df['lon_acc'], 'b-', label='Longitudinal Acceleration')
+        if 'ip_acc' in df.columns and 'ip_dec' in df.columns:
+            ax1.plot(df['simTime'], df['ip_acc'], 'r--', label='Acceleration Threshold')
+            ax1.plot(df['simTime'], df['ip_dec'], 'g--', label='Deceleration Threshold')
+        
+        # 添加橙色背景标识顿挫事件
+        for idx, event in cadence_events.iterrows():
+            label = 'Cadence Event' if idx == 0 else None
+            ax1.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Longitudinal Acceleration (m/s²)')
+        ax1.set_title('Cadence Event Detection - Longitudinal Acceleration')
+        ax1.grid(True)
+        ax1.legend()
+        
+        # 图 2:速度
+        ax2 = plt.subplot(2, 1, 2)
+        ax2.plot(df['simTime'], df['v'], 'g-', label='Velocity')
+        
+        # 添加橙色背景标识顿挫事件
+        for idx, event in cadence_events.iterrows():
+            label = 'Cadence Event' if idx == 0 else None
+            ax2.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Velocity (m/s)')
+        ax2.set_title('Cadence Event Detection - Vehicle Speed')
+        ax2.grid(True)
+        ax2.legend()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"cadence_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"Cadence chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate cadence chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_slam_brake_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate slam brake metric chart with orange background for slam brake events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        slam_brake_events = comfort_calculator.discomfort_df[comfort_calculator.discomfort_df['type'] == 'slam_brake'].copy()
+        
+        if df.empty:
+            logger.warning("Cannot generate slam brake chart: empty data")
+            return None
+            
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"slam_brake_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'lon_acc': df['lon_acc'],
+            'v': df['v'],
+            'min_threshold': df.get('ip_dec', pd.Series([None]*len(df))),
+            'max_threshold': 0.0
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Slam brake data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+        
+        # 图 1:纵向加速度
+        ax1 = plt.subplot(2, 1, 1)
+        ax1.plot(df['simTime'], df['lon_acc'], 'b-', label='Longitudinal Acceleration')
+        if 'min_threshold' in df.columns:
+            ax1.plot(df['simTime'], df['min_threshold'], 'r--', label='Deceleration Threshold')
+        
+        # 添加橙色背景标识急刹车事件
+        for idx, event in slam_brake_events.iterrows():
+            label = 'Slam Brake Event' if idx == 0 else None
+            ax1.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Longitudinal Acceleration (m/s²)')
+        ax1.set_title('Slam Brake Event Detection - Longitudinal Acceleration')
+        ax1.grid(True)
+        ax1.legend()
+        
+        # 图 2:速度
+        ax2 = plt.subplot(2, 1, 2)
+        ax2.plot(df['simTime'], df['v'], 'g-', label='Velocity')
+        
+        # 添加橙色背景标识急刹车事件
+        for idx, event in slam_brake_events.iterrows():
+            label = 'Slam Brake Event' if idx == 0 else None
+            ax2.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Velocity (m/s)')
+        ax2.set_title('Slam Brake Event Detection - Vehicle Speed')
+        ax2.grid(True)
+        ax2.legend()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"slam_brake_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"Slam brake chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate slam brake chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_slam_accelerate_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate slam accelerate metric chart with orange background for slam accelerate events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        slam_accel_events = comfort_calculator.discomfort_df[comfort_calculator.discomfort_df['type'] == 'slam_accel'].copy()
+        
+        if df.empty:
+            logger.warning("Cannot generate slam accelerate chart: empty data")
+            return None
+            
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"slam_accel_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'lon_acc': df['lon_acc'],
+            'v': df['v'],
+            'min_threshold': 0.0,
+            'max_threshold': df.get('ip_acc', pd.Series([None]*len(df)))
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"Slam accelerate data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 8), constrained_layout=True)
+        
+        # 图 1:纵向加速度
+        ax1 = plt.subplot(2, 1, 1)
+        ax1.plot(df['simTime'], df['lon_acc'], 'b-', label='Longitudinal Acceleration')
+        if 'max_threshold' in df.columns:
+            ax1.plot(df['simTime'], df['max_threshold'], 'r--', label='Acceleration Threshold')
+        
+        # 添加橙色背景标识急加速事件
+        for idx, event in slam_accel_events.iterrows():
+            label = 'Slam Accelerate Event' if idx == 0 else None
+            ax1.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax1.set_xlabel('Time (s)')
+        ax1.set_ylabel('Longitudinal Acceleration (m/s²)')
+        ax1.set_title('Slam Accelerate Event Detection - Longitudinal Acceleration')
+        ax1.grid(True)
+        ax1.legend()
+        
+        # 图 2:速度
+        ax2 = plt.subplot(2, 1, 2)
+        ax2.plot(df['simTime'], df['v'], 'g-', label='Velocity')
+        
+        # 添加橙色背景标识急加速事件
+        for idx, event in slam_accel_events.iterrows():
+            label = 'Slam Accelerate Event' if idx == 0 else None
+            ax2.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        ax2.set_xlabel('Time (s)')
+        ax2.set_ylabel('Velocity (m/s)')
+        ax2.set_title('Slam Accelerate Event Detection - Vehicle Speed')
+        ax2.grid(True)
+        ax2.legend()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"slam_accel_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"Slam accelerate chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate slam accelerate chart: {str(e)}", exc_info=True)
+        return None
+
+def get_metric_thresholds(calculator, metric_name: str) -> dict:
+    """
+    从配置文件中获取指标的阈值
+    
+    Args:
+        calculator: Calculator instance (SafetyCalculator or ComfortCalculator)
+        metric_name: 指标名称
+        
+    Returns:
+        dict: 包含min和max阈值的字典
+    """
+    logger = LogManager().get_logger()
+    thresholds = {"min": None, "max": None}
+    
+    try:
+        # 根据计算器类型获取配置
+        if hasattr(calculator, 'data_processed'):
+            if hasattr(calculator.data_processed, 'safety_config') and 'safety' in calculator.data_processed.safety_config:
+                config = calculator.data_processed.safety_config['safety']
+                metric_type = 'safety'
+            elif hasattr(calculator.data_processed, 'comfort_config') and 'comfort' in calculator.data_processed.comfort_config:
+                config = calculator.data_processed.comfort_config['comfort']
+                metric_type = 'comfort'
+            else:
+                logger.warning(f"无法找到{metric_name}的配置信息")
+                return thresholds
+        else:
+            logger.warning(f"计算器没有data_processed属性")
+            return thresholds
+            
+        # 递归查找指标配置
+        def find_metric_config(node, target_name):
+            if isinstance(node, dict):
+                if 'name' in node and node['name'].lower() == target_name.lower() and 'min' in node and 'max' in node:
+                    return node
+                for key, value in node.items():
+                    result = find_metric_config(value, target_name)
+                    if result:
+                        return result
+            return None
+        
+        # 查找指标配置
+        metric_config = find_metric_config(config, metric_name)
+        if metric_config:
+            thresholds["min"] = metric_config.get("min")
+            thresholds["max"] = metric_config.get("max")
+            logger.info(f"找到{metric_name}的阈值: min={thresholds['min']}, max={thresholds['max']}")
+        else:
+            logger.warning(f"在{metric_type}配置中未找到{metric_name}的阈值信息")
+    
+    except Exception as e:
+        logger.error(f"获取{metric_name}阈值时出错: {str(e)}", exc_info=True)
+    
+    return thresholds
+
+def generate_safety_chart_data(safety_calculator, metric_name: str, output_dir: Optional[str] = None) -> Optional[str]:
+    """
+    Generate chart data for safety metrics
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        metric_name: Metric name
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 确保输出目录存在
+        if output_dir:
+            os.makedirs(output_dir, exist_ok=True)
+        else:
+            output_dir = os.getcwd()
+            
+        # 根据指标名称选择不同的图表生成方法
+        if metric_name.lower() == 'ttc':
+            return generate_ttc_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'mttc':
+            return generate_mttc_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'thw':
+            return generate_thw_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'lonsd':
+            return generate_lonsd_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'latsd':
+            return generate_latsd_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'btn':
+            return generate_btn_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'collisionrisk':
+            return generate_collision_risk_chart(safety_calculator, output_dir)
+        elif metric_name.lower() == 'collisionseverity':
+            return generate_collision_severity_chart(safety_calculator, output_dir)
+        else:
+            logger.warning(f"Chart generation not implemented for metric [{metric_name}]")
+            return None
+            
+    except Exception as e:
+        logger.error(f"Failed to generate chart data: {str(e)}", exc_info=True)
+        return None
+
+def generate_ttc_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate TTC metric chart with orange background for unsafe events.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        ttc_data = safety_calculator.ttc_data
+        
+        if not ttc_data:
+            logger.warning("Cannot generate TTC chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(ttc_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'TTC')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"ttc_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'simFrame': df['simFrame'],
+            'TTC': df['TTC'],
+            'min_threshold': min_threshold,
+            'max_threshold': max_threshold
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"TTC data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 检测超阈值事件
+        unsafe_events = []
+        if min_threshold is not None:
+            # 对于TTC,小于最小阈值视为不安全
+            unsafe_condition = df['TTC'] < min_threshold
+            event_groups = (unsafe_condition != unsafe_condition.shift()).cumsum()
+            
+            for _, group in df[unsafe_condition].groupby(event_groups):
+                if len(group) >= 2:  # 至少2帧才算一次事件
+                    start_time = group['simTime'].iloc[0]
+                    end_time = group['simTime'].iloc[-1]
+                    duration = end_time - start_time
+                    
+                    if duration >= 0.1:  # 只记录持续时间超过0.1秒的事件
+                        unsafe_events.append({
+                            'start_time': start_time,
+                            'end_time': end_time,
+                            'start_frame': group['simFrame'].iloc[0],
+                            'end_frame': group['simFrame'].iloc[-1],
+                            'duration': duration,
+                            'min_ttc': group['TTC'].min()
+                        })
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 8))
+        plt.plot(df['simTime'], df['TTC'], 'b-', label='TTC')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}s)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold})')
+        
+        # 添加橙色背景标识不安全事件
+        for idx, event in enumerate(unsafe_events):
+            label = 'Unsafe TTC Event' if idx == 0 else None
+            plt.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('TTC (s)')
+        plt.title('Time To Collision (TTC) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"ttc_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 记录不安全事件信息
+        if unsafe_events:
+            logger.info(f"检测到 {len(unsafe_events)} 个TTC不安全事件")
+            for i, event in enumerate(unsafe_events):
+                logger.info(f"TTC不安全事件 #{i+1}: 开始时间={event['start_time']:.2f}s, 结束时间={event['end_time']:.2f}s, 持续时间={event['duration']:.2f}s, 最小TTC={event['min_ttc']:.2f}s")
+        
+        logger.info(f"TTC chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate TTC chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_mttc_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate MTTC metric chart with orange background for unsafe events
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        mttc_data = safety_calculator.mttc_data
+        
+        if not mttc_data:
+            logger.warning("Cannot generate MTTC chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(mttc_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'MTTC')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 检测超阈值事件
+        unsafe_events = []
+        if min_threshold is not None:
+            # 对于MTTC,小于最小阈值视为不安全
+            unsafe_condition = df['MTTC'] < min_threshold
+            event_groups = (unsafe_condition != unsafe_condition.shift()).cumsum()
+            
+            for _, group in df[unsafe_condition].groupby(event_groups):
+                if len(group) >= 2:  # 至少2帧才算一次事件
+                    start_time = group['simTime'].iloc[0]
+                    end_time = group['simTime'].iloc[-1]
+                    duration = end_time - start_time
+                    
+                    if duration >= 0.1:  # 只记录持续时间超过0.1秒的事件
+                        unsafe_events.append({
+                            'start_time': start_time,
+                            'end_time': end_time,
+                            'start_frame': group['simFrame'].iloc[0],
+                            'end_frame': group['simFrame'].iloc[-1],
+                            'duration': duration,
+                            'min_mttc': group['MTTC'].min()
+                        })
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['MTTC'], 'g-', label='MTTC')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}s)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold})')
+        
+        # 添加橙色背景标识不安全事件
+        for idx, event in enumerate(unsafe_events):
+            label = 'Unsafe MTTC Event' if idx == 0 else None
+            plt.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('MTTC (s)')
+        plt.title('Modified Time To Collision (MTTC) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"mttc_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"mttc_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        # 记录不安全事件信息
+        if unsafe_events:
+            logger.info(f"检测到 {len(unsafe_events)} 个MTTC不安全事件")
+            for i, event in enumerate(unsafe_events):
+                logger.info(f"MTTC不安全事件 #{i+1}: 开始时间={event['start_time']:.2f}s, 结束时间={event['end_time']:.2f}s, 持续时间={event['duration']:.2f}s, 最小MTTC={event['min_mttc']:.2f}s")
+        
+        logger.info(f"MTTC chart saved to: {chart_filename}")
+        logger.info(f"MTTC data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate MTTC chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_thw_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate THW metric chart with orange background for unsafe events
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        thw_data = safety_calculator.thw_data
+        
+        if not thw_data:
+            logger.warning("Cannot generate THW chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(thw_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'THW')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 检测超阈值事件
+        unsafe_events = []
+        if min_threshold is not None:
+            # 对于THW,小于最小阈值视为不安全
+            unsafe_condition = df['THW'] < min_threshold
+            event_groups = (unsafe_condition != unsafe_condition.shift()).cumsum()
+            
+            for _, group in df[unsafe_condition].groupby(event_groups):
+                if len(group) >= 2:  # 至少2帧才算一次事件
+                    start_time = group['simTime'].iloc[0]
+                    end_time = group['simTime'].iloc[-1]
+                    duration = end_time - start_time
+                    
+                    if duration >= 0.1:  # 只记录持续时间超过0.1秒的事件
+                        unsafe_events.append({
+                            'start_time': start_time,
+                            'end_time': end_time,
+                            'start_frame': group['simFrame'].iloc[0],
+                            'end_frame': group['simFrame'].iloc[-1],
+                            'duration': duration,
+                            'min_thw': group['THW'].min()
+                        })
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['THW'], 'c-', label='THW')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}s)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold})')
+        
+        # 添加橙色背景标识不安全事件
+        for idx, event in enumerate(unsafe_events):
+            label = 'Unsafe THW Event' if idx == 0 else None
+            plt.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('THW (s)')
+        plt.title('Time Headway (THW) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"thw_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"thw_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        # 记录不安全事件信息
+        if unsafe_events:
+            logger.info(f"检测到 {len(unsafe_events)} 个THW不安全事件")
+            for i, event in enumerate(unsafe_events):
+                logger.info(f"THW不安全事件 #{i+1}: 开始时间={event['start_time']:.2f}s, 结束时间={event['end_time']:.2f}s, 持续时间={event['duration']:.2f}s, 最小THW={event['min_thw']:.2f}s")
+        
+        logger.info(f"THW chart saved to: {chart_filename}")
+        logger.info(f"THW data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate THW chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_lonsd_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate Longitudinal Safe Distance metric chart
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        lonsd_data = safety_calculator.lonsd_data
+        
+        if not lonsd_data:
+            logger.warning("Cannot generate Longitudinal Safe Distance chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(lonsd_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'LonSD')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['LonSD'], 'm-', label='Longitudinal Safe Distance')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}m)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold}m)')
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('Distance (m)')
+        plt.title('Longitudinal Safe Distance (LonSD) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"lonsd_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"lonsd_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        logger.info(f"Longitudinal Safe Distance chart saved to: {chart_filename}")
+        logger.info(f"Longitudinal Safe Distance data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate Longitudinal Safe Distance chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_latsd_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate Lateral Safe Distance metric chart with orange background for unsafe events
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        latsd_data = safety_calculator.latsd_data
+        
+        if not latsd_data:
+            logger.warning("Cannot generate Lateral Safe Distance chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(latsd_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'LatSD')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 检测超阈值事件
+        unsafe_events = []
+        if min_threshold is not None:
+            # 对于LatSD,小于最小阈值视为不安全
+            unsafe_condition = df['LatSD'] < min_threshold
+            event_groups = (unsafe_condition != unsafe_condition.shift()).cumsum()
+            
+            for _, group in df[unsafe_condition].groupby(event_groups):
+                if len(group) >= 2:  # 至少2帧才算一次事件
+                    start_time = group['simTime'].iloc[0]
+                    end_time = group['simTime'].iloc[-1]
+                    duration = end_time - start_time
+                    
+                    if duration >= 0.1:  # 只记录持续时间超过0.1秒的事件
+                        unsafe_events.append({
+                            'start_time': start_time,
+                            'end_time': end_time,
+                            'start_frame': group['simFrame'].iloc[0],
+                            'end_frame': group['simFrame'].iloc[-1],
+                            'duration': duration,
+                            'min_latsd': group['LatSD'].min()
+                        })
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['LatSD'], 'y-', label='Lateral Safe Distance')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}m)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold}m)')
+        
+        # 添加橙色背景标识不安全事件
+        for idx, event in enumerate(unsafe_events):
+            label = 'Unsafe LatSD Event' if idx == 0 else None
+            plt.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('Distance (m)')
+        plt.title('Lateral Safe Distance (LatSD) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"latsd_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"latsd_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        # 记录不安全事件信息
+        if unsafe_events:
+            logger.info(f"检测到 {len(unsafe_events)} 个LatSD不安全事件")
+            for i, event in enumerate(unsafe_events):
+                logger.info(f"LatSD不安全事件 #{i+1}: 开始时间={event['start_time']:.2f}s, 结束时间={event['end_time']:.2f}s, 持续时间={event['duration']:.2f}s, 最小LatSD={event['min_latsd']:.2f}m")
+        
+        logger.info(f"Lateral Safe Distance chart saved to: {chart_filename}")
+        logger.info(f"Lateral Safe Distance data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate Lateral Safe Distance chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_btn_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate Brake Threat Number metric chart with orange background for unsafe events
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        btn_data = safety_calculator.btn_data
+        
+        if not btn_data:
+            logger.warning("Cannot generate Brake Threat Number chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(btn_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'BTN')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 检测超阈值事件
+        unsafe_events = []
+        if max_threshold is not None:
+            # 对于BTN,大于最大阈值视为不安全
+            unsafe_condition = df['BTN'] > max_threshold
+            event_groups = (unsafe_condition != unsafe_condition.shift()).cumsum()
+            
+            for _, group in df[unsafe_condition].groupby(event_groups):
+                if len(group) >= 2:  # 至少2帧才算一次事件
+                    start_time = group['simTime'].iloc[0]
+                    end_time = group['simTime'].iloc[-1]
+                    duration = end_time - start_time
+                    
+                    if duration >= 0.1:  # 只记录持续时间超过0.1秒的事件
+                        unsafe_events.append({
+                            'start_time': start_time,
+                            'end_time': end_time,
+                            'start_frame': group['simFrame'].iloc[0],
+                            'end_frame': group['simFrame'].iloc[-1],
+                            'duration': duration,
+                            'max_btn': group['BTN'].max()
+                        })
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['BTN'], 'r-', label='Brake Threat Number')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold})')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold})')
+        
+        # 添加橙色背景标识不安全事件
+        for idx, event in enumerate(unsafe_events):
+            label = 'Unsafe BTN Event' if idx == 0 else None
+            plt.axvspan(event['start_time'], event['end_time'], 
+                       alpha=0.3, color='orange', label=label)
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('BTN')
+        plt.title('Brake Threat Number (BTN) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"btn_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"btn_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        # 记录不安全事件信息
+        if unsafe_events:
+            logger.info(f"检测到 {len(unsafe_events)} 个BTN不安全事件")
+            for i, event in enumerate(unsafe_events):
+                logger.info(f"BTN不安全事件 #{i+1}: 开始时间={event['start_time']:.2f}s, 结束时间={event['end_time']:.2f}s, 持续时间={event['duration']:.2f}s, 最大BTN={event['max_btn']:.2f}")
+        
+        logger.info(f"Brake Threat Number chart saved to: {chart_filename}")
+        logger.info(f"Brake Threat Number data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate Brake Threat Number chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_collision_risk_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate Collision Risk metric chart
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        risk_data = safety_calculator.collision_risk_data
+        
+        if not risk_data:
+            logger.warning("Cannot generate Collision Risk chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(risk_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'collisionRisk')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['collisionRisk'], 'r-', label='Collision Risk')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}%)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold}%)')
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('Risk Value (%)')
+        plt.title('Collision Risk (collisionRisk) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"collision_risk_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"collisionrisk_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        logger.info(f"Collision Risk chart saved to: {chart_filename}")
+        logger.info(f"Collision Risk data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate Collision Risk chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_collision_severity_chart(safety_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate Collision Severity metric chart
+    
+    Args:
+        safety_calculator: SafetyCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        severity_data = safety_calculator.collision_severity_data
+        
+        if not severity_data:
+            logger.warning("Cannot generate Collision Severity chart: empty data")
+            return None
+            
+        # 创建DataFrame
+        df = pd.DataFrame(severity_data)
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(safety_calculator, 'collisionSeverity')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 创建图表
+        plt.figure(figsize=(12, 6))
+        plt.plot(df['simTime'], df['collisionSeverity'], 'r-', label='Collision Severity')
+        
+        # 添加阈值线
+        if min_threshold is not None:
+            plt.axhline(y=min_threshold, color='r', linestyle='--', label=f'Min Threshold ({min_threshold}%)')
+        if max_threshold is not None:
+            plt.axhline(y=max_threshold, color='g', linestyle='--', label=f'Max Threshold ({max_threshold}%)')
+        
+        plt.xlabel('Time (s)')
+        plt.ylabel('Severity (%)')
+        plt.title('Collision Severity (collisionSeverity) Trend')
+        plt.grid(True)
+        plt.legend()
+        
+        # 保存图表
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        chart_filename = os.path.join(output_dir, f"collision_severity_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        # 保存CSV数据,包含阈值信息
+        csv_filename = os.path.join(output_dir, f"collisionseverity_data_{timestamp}.csv")
+        df_csv = df.copy()
+        df_csv['min_threshold'] = min_threshold
+        df_csv['max_threshold'] = max_threshold
+        df_csv.to_csv(csv_filename, index=False)
+        
+        logger.info(f"Collision Severity chart saved to: {chart_filename}")
+        logger.info(f"Collision Severity data saved to: {csv_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate Collision Severity chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_vdv_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate VDV (Vibration Dose Value) metric chart with data saved to CSV first.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        vdv_value = comfort_calculator.calculated_value.get('vdv', 0)
+        
+        if df.empty:
+            logger.warning("Cannot generate VDV chart: empty data")
+            return None
+            
+        # 确保有必要的列
+        if 'accelX' not in df.columns or 'accelY' not in df.columns:
+            logger.warning("Missing required columns for VDV chart")
+            return None
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(comfort_calculator, 'vdv')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 将东北天坐标系下的加速度转换为车身坐标系下的加速度
+        if 'posH' not in df.columns:
+            logger.warning("Missing heading angle data for coordinate transformation")
+            return None
+            
+        # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
+        df['posH_rad'] = np.radians(df['posH'])
+        
+        # 转换加速度到车身坐标系
+        df['a_x_body'] = df['accelX'] * np.sin(df['posH_rad']) + df['accelY'] * np.cos(df['posH_rad'])
+        df['a_y_body'] = df['accelX'] * np.cos(df['posH_rad']) - df['accelY'] * np.sin(df['posH_rad'])
+        df['a_z_body'] = df['accelZ'] if 'accelZ' in df.columns else pd.Series(np.zeros(len(df)))
+        
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"vdv_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'a_x_body': df['a_x_body'],
+            'a_y_body': df['a_y_body'],
+            'a_z_body': df['a_z_body'],
+            'v': df['v'],
+            'min_threshold': min_threshold,
+            'max_threshold': max_threshold,
+            'vdv_value': vdv_value
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"VDV data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 8))
+        
+        # 绘制三轴加速度
+        plt.subplot(3, 1, 1)
+        plt.plot(df['simTime'], df['a_x_body'], 'r-', label='X-axis Acceleration')
+        
+        # 添加阈值线
+        if 'min_threshold' in df.columns and df['min_threshold'].iloc[0] is not None:
+            min_threshold = df['min_threshold'].iloc[0]
+            plt.axhline(y=min_threshold, color='r', linestyle=':', label=f'Min Threshold ({min_threshold})')
+        if 'max_threshold' in df.columns and df['max_threshold'].iloc[0] is not None:
+            max_threshold = df['max_threshold'].iloc[0]
+            plt.axhline(y=max_threshold, color='g', linestyle=':', label=f'Max Threshold ({max_threshold})')
+            
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body X-axis Acceleration (Longitudinal)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 1, 2)
+        plt.plot(df['simTime'], df['a_y_body'], 'g-', label='Y-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body Y-axis Acceleration (Lateral)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 1, 3)
+        plt.plot(df['simTime'], df['a_z_body'], 'b-', label='Z-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        vdv_value = df['vdv_value'].iloc[0] if 'vdv_value' in df.columns else 0
+        plt.title(f'Body Z-axis Acceleration (Vertical) - VDV value: {vdv_value:.4f}')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.tight_layout()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"vdv_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"VDV chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate VDV chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_ava_vav_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate AVA_VAV (Average Vibration Acceleration Value) metric chart with data saved to CSV first.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        ava_vav_value = comfort_calculator.calculated_value.get('ava_vav', 0)
+        
+        if df.empty:
+            logger.warning("Cannot generate AVA_VAV chart: empty data")
+            return None
+            
+        # 确保有必要的列
+        if 'accelX' not in df.columns or 'accelY' not in df.columns:
+            logger.warning("Missing required columns for AVA_VAV chart")
+            return None
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(comfort_calculator, 'ava_vav')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 将东北天坐标系下的加速度转换为车身坐标系下的加速度
+        if 'posH' not in df.columns:
+            logger.warning("Missing heading angle data for coordinate transformation")
+            return None
+            
+        # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
+        df['posH_rad'] = np.radians(df['posH'])
+        
+        # 转换加速度到车身坐标系
+        df['a_x_body'] = df['accelX'] * np.sin(df['posH_rad']) + df['accelY'] * np.cos(df['posH_rad'])
+        df['a_y_body'] = df['accelX'] * np.cos(df['posH_rad']) - df['accelY'] * np.sin(df['posH_rad'])
+        df['a_z_body'] = df['accelZ'] if 'accelZ' in df.columns else pd.Series(np.zeros(len(df)))
+        
+        # 角速度数据
+        df['omega_roll'] = df['rollRate'] if 'rollRate' in df.columns else pd.Series(np.zeros(len(df)))
+        df['omega_pitch'] = df['pitchRate'] if 'pitchRate' in df.columns else pd.Series(np.zeros(len(df)))
+        df['omega_yaw'] = df['speedH']  # 使用航向角速度作为偏航角速度
+        
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"ava_vav_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'a_x_body': df['a_x_body'],
+            'a_y_body': df['a_y_body'],
+            'a_z_body': df['a_z_body'],
+            'omega_roll': df['omega_roll'],
+            'omega_pitch': df['omega_pitch'],
+            'omega_yaw': df['omega_yaw'],
+            'min_threshold': min_threshold,
+            'max_threshold': max_threshold,
+            'ava_vav_value': ava_vav_value
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"AVA_VAV data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 10))
+        
+        # 绘制三轴加速度
+        plt.subplot(3, 2, 1)
+        plt.plot(df['simTime'], df['a_x_body'], 'r-', label='X-axis Acceleration')
+        
+        # 添加阈值线
+        if 'min_threshold' in df.columns and df['min_threshold'].iloc[0] is not None:
+            min_threshold = df['min_threshold'].iloc[0]
+            plt.axhline(y=min_threshold, color='r', linestyle=':', label=f'Min Threshold ({min_threshold})')
+        if 'max_threshold' in df.columns and df['max_threshold'].iloc[0] is not None:
+            max_threshold = df['max_threshold'].iloc[0]
+            plt.axhline(y=max_threshold, color='g', linestyle=':', label=f'Max Threshold ({max_threshold})')
+            
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body X-axis Acceleration (Longitudinal)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 2, 3)
+        plt.plot(df['simTime'], df['a_y_body'], 'g-', label='Y-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body Y-axis Acceleration (Lateral)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 2, 5)
+        plt.plot(df['simTime'], df['a_z_body'], 'b-', label='Z-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body Z-axis Acceleration (Vertical)')
+        plt.grid(True)
+        plt.legend()
+        
+        # 绘制三轴角速度
+        plt.subplot(3, 2, 2)
+        plt.plot(df['simTime'], df['omega_roll'], 'r-', label='Roll Rate')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Angular Velocity (deg/s)')
+        plt.title('Roll Rate')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 2, 4)
+        plt.plot(df['simTime'], df['omega_pitch'], 'g-', label='Pitch Rate')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Angular Velocity (deg/s)')
+        plt.title('Pitch Rate')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 2, 6)
+        plt.plot(df['simTime'], df['omega_yaw'], 'b-', label='Yaw Rate')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Angular Velocity (deg/s)')
+        ava_vav_value = df['ava_vav_value'].iloc[0] if 'ava_vav_value' in df.columns else 0
+        plt.title(f'Yaw Rate - AVA_VAV value: {ava_vav_value:.4f}')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.tight_layout()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"ava_vav_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"AVA_VAV chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate AVA_VAV chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_msdv_chart(comfort_calculator, output_dir: str) -> Optional[str]:
+    """
+    Generate MSDV (Motion Sickness Dose Value) metric chart with data saved to CSV first.
+    This version first saves data to CSV, then uses the CSV to generate the chart.
+    
+    Args:
+        comfort_calculator: ComfortCalculator instance
+        output_dir: Output directory
+        
+    Returns:
+        str: Chart file path, or None if generation fails
+    """
+    logger = LogManager().get_logger()
+    
+    try:
+        # 获取数据
+        df = comfort_calculator.ego_df.copy()
+        msdv_value = comfort_calculator.calculated_value.get('msdv', 0)
+        motion_sickness_prob = comfort_calculator.calculated_value.get('motionSickness', 0)
+        
+        if df.empty:
+            logger.warning("Cannot generate MSDV chart: empty data")
+            return None
+            
+        # 确保有必要的列
+        if 'accelX' not in df.columns or 'accelY' not in df.columns:
+            logger.warning("Missing required columns for MSDV chart")
+            return None
+        
+        # 获取阈值
+        thresholds = get_metric_thresholds(comfort_calculator, 'msdv')
+        min_threshold = thresholds.get('min')
+        max_threshold = thresholds.get('max')
+        
+        # 将东北天坐标系下的加速度转换为车身坐标系下的加速度
+        if 'posH' not in df.columns:
+            logger.warning("Missing heading angle data for coordinate transformation")
+            return None
+            
+        # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
+        df['posH_rad'] = np.radians(df['posH'])
+        
+        # 转换加速度到车身坐标系
+        df['a_x_body'] = df['accelX'] * np.sin(df['posH_rad']) + df['accelY'] * np.cos(df['posH_rad'])
+        df['a_y_body'] = df['accelX'] * np.cos(df['posH_rad']) - df['accelY'] * np.sin(df['posH_rad'])
+        df['a_z_body'] = df['accelZ'] if 'accelZ' in df.columns else pd.Series(np.zeros(len(df)))
+        
+        # 生成时间戳
+        import datetime
+        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
+        
+        # 保存 CSV 数据(第一步)
+        csv_filename = os.path.join(output_dir, f"msdv_data_{timestamp}.csv")
+        df_csv = pd.DataFrame({
+            'simTime': df['simTime'],
+            'a_x_body': df['a_x_body'],
+            'a_y_body': df['a_y_body'],
+            'a_z_body': df['a_z_body'],
+            'v': df['v'],
+            'min_threshold': min_threshold,
+            'max_threshold': max_threshold,
+            'msdv_value': msdv_value,
+            'motion_sickness_prob': motion_sickness_prob
+        })
+        df_csv.to_csv(csv_filename, index=False)
+        logger.info(f"MSDV data saved to: {csv_filename}")
+        
+        # 第二步:从 CSV 读取(可验证保存数据无误)
+        df = pd.read_csv(csv_filename)
+        
+        # 创建图表(第三步)
+        plt.figure(figsize=(12, 8))
+        
+        # 绘制三轴加速度
+        plt.subplot(3, 1, 1)
+        plt.plot(df['simTime'], df['a_x_body'], 'r-', label='X-axis Acceleration')
+        
+        # 添加阈值线
+        if 'min_threshold' in df.columns and df['min_threshold'].iloc[0] is not None:
+            min_threshold = df['min_threshold'].iloc[0]
+            plt.axhline(y=min_threshold, color='r', linestyle=':', label=f'Min Threshold ({min_threshold})')
+        if 'max_threshold' in df.columns and df['max_threshold'].iloc[0] is not None:
+            max_threshold = df['max_threshold'].iloc[0]
+            plt.axhline(y=max_threshold, color='g', linestyle=':', label=f'Max Threshold ({max_threshold})')
+            
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body X-axis Acceleration (Longitudinal)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 1, 2)
+        plt.plot(df['simTime'], df['a_y_body'], 'g-', label='Y-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        plt.title('Body Y-axis Acceleration (Lateral)')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.subplot(3, 1, 3)
+        plt.plot(df['simTime'], df['a_z_body'], 'b-', label='Z-axis Acceleration')
+        plt.xlabel('Time (s)')
+        plt.ylabel('Acceleration (m/s²)')
+        msdv_value = df['msdv_value'].iloc[0] if 'msdv_value' in df.columns else 0
+        motion_sickness_prob = df['motion_sickness_prob'].iloc[0] if 'motion_sickness_prob' in df.columns else 0
+        plt.title(f'Body Z-axis Acceleration (Vertical) - MSDV: {msdv_value:.4f}, Motion Sickness Probability: {motion_sickness_prob:.2f}%')
+        plt.grid(True)
+        plt.legend()
+        
+        plt.tight_layout()
+        
+        # 保存图像
+        chart_filename = os.path.join(output_dir, f"msdv_chart_{timestamp}.png")
+        plt.savefig(chart_filename, dpi=300)
+        plt.close()
+        
+        logger.info(f"MSDV chart saved to: {chart_filename}")
+        return chart_filename
+        
+    except Exception as e:
+        logger.error(f"Failed to generate MSDV chart: {str(e)}", exc_info=True)
+        return None
+
+def generate_traffic_chart_data(traffic_calculator, metric_name: str, output_dir: Optional[str] = None) -> Optional[str]:
+    """Generate chart data for traffic metrics"""
+    # 待实现
+    return None
+
+def generate_function_chart_data(function_calculator, metric_name: str, output_dir: Optional[str] = None) -> Optional[str]:
+    """Generate chart data for function metrics"""
+    # 待实现
+    return None

+ 12 - 2
modules/lib/data_process.py

@@ -271,7 +271,7 @@ class DataPreprocessing:
         # 创建副本避免修改原始数据
         ego_data = ego_data.copy()
         
-        # 添加坐标系转换:将东北天坐标系下的加速度转换为车辆坐标系下的加速度
+        # 添加坐标系转换:将东北天坐标系下的加速度和速度转换为车辆坐标系下的值
         # 使用车辆航向角进行转换
         # 注意:与safety.py保持一致,使用(90 - heading)作为与x轴的夹角
         ego_data['heading_rad'] = np.deg2rad(90 - ego_data['posH'])  # 转换为与x轴的夹角
@@ -281,9 +281,19 @@ class DataPreprocessing:
                                      ego_data['accelY'] * np.sin(ego_data['heading_rad'])
         ego_data['lat_acc_vehicle'] = -ego_data['accelX'] * np.sin(ego_data['heading_rad']) + \
                                      ego_data['accelY'] * np.cos(ego_data['heading_rad'])
+         
+        # 使用向量化操作计算车辆坐标系下的纵向和横向速度
+        ego_data['lon_v_vehicle'] = ego_data['speedX'] * np.cos(ego_data['heading_rad']) + \
+                                   ego_data['speedY'] * np.sin(ego_data['heading_rad'])
+        ego_data['lat_v_vehicle'] = -ego_data['speedX'] * np.sin(ego_data['heading_rad']) + \
+                                   ego_data['speedY'] * np.cos(ego_data['heading_rad'])
         
-        # 将原始的东北天坐标系加速度保留,但在comfort.py中使用车辆坐标系加速度
+        # 将原始的东北天坐标系加速度和速度保留,但在其他模块中可以直接使用车辆坐标系的值
         ego_data['lon_acc'] = ego_data['lon_acc_vehicle']
         ego_data['lat_acc'] = ego_data['lat_acc_vehicle']
         
+        # 记录日志
+        logger = LogManager().get_logger()
+        logger.info("已将加速度和速度转换为车辆坐标系")
+        
         return ego_data

+ 13 - 0
modules/metric/README.md

@@ -0,0 +1,13 @@
+| 项目          | `ava_vav`                                            | `rideQualityScore`                     |
+| ----------- | ---------------------------------------------------- | -------------------------------------- |
+| 指标全称        | 加权线性加速度 + 加权角速度 的综合指标                                | 骑行/乘坐舒适度评分(通常为厂商自定义)                   |
+| 是否标准指标      | ❌ 自定义扩展(参考 ISO 加权)                                   | ❌ 完全非标准化(多数为评分形式)                      |
+| 是否使用 ISO 加权 | ✅ 使用了 ISO Wd/Wk + 自定义角速度加权                           | 可能使用,也可能只是基于横向纵向 RMS 值估计               |
+| 是否有权重       | ✅ 有,通常:kx=1,ky=1,kz=1,kroll=0.63,kpitch=0.8,kyaw=0.5 | 可能有,厂商自定                               |
+| 计算方式        | √(Σ各方向(rms²×权重))                                     | 不一定,可能是分段映射或指数衰减                       |
+| 输出数值含义      | 单位为加速度 (m/s²),值越大表示越不舒适                              | 通常为评分值(如 0\~100),值越大越舒适                |
+| 阈值来源        | 📌 通常参考 ISO 2631-1:1997 的加速度舒适等级                     | 📌 没有标准阈值,大多数厂商直接套用 ISO 加速度等级或通过主观评分拟合 |
+| 区别点         | ✔ 加速度+角速度,多维综合                                       | ❗ 有些实现只是横纵向 rms 加速度                    |
+虽然 rideQualityScore 和 ava_vav 在计算方式上可能不同,但它们通常都用于描述“乘坐舒适性”,并且都涉及频率加权加速度的 RMS 值,所以在没有行业明确标准定义 rideQualityScore 阈值的情况下:
+
+✅ 开发者常常借用 ISO 2631-1 中的加速度舒适等级(即 0.315、0.63、1.0、1.6)作为这两者的阈值。

+ 0 - 886
modules/metric/comfort copy.py

@@ -1,886 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-##################################################################
-#
-# Copyright (c) 2023 CICV, Inc. All Rights Reserved
-#
-##################################################################
-"""
-@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn), yangzihao(yangzihao@china-icv.cn)
-@Data:              2023/06/25
-@Last Modified:     2025/04/25
-@Summary:           Comfort metrics
-"""
-
-import sys
-import math
-import scipy.signal
-import pandas as pd
-import numpy as np
-from pathlib import Path 
-from typing import Dict, List, Any, Optional, Callable, Union, Tuple
-
-from modules.lib.score import Score
-from modules.lib.common import get_interpolation, get_frame_with_time
-from modules.lib import data_process
-
-from modules.lib.log_manager import LogManager
-
-COMFORT_INFO = [
-    "simTime",
-    "simFrame",
-    "speedX",
-    "speedY",
-    "accelX",
-    "accelY",
-    "curvHor",
-    "lightMask",
-    "v",
-    "lat_acc",
-    "lon_acc",
-    "time_diff",
-    "lon_acc_diff",
-    "lon_acc_roc",
-    "speedH",
-    "accelH",
-    "posH",  # 确保包含航向角
-]
-# ----------------------
-# 独立指标计算函数
-# ----------------------
-# 添加 Motion Sickness Dose Value (MSDV) 指标
-#用于量化乘员因持续振动而产生的晕动风险。以下是需要添加的代码:
-
-## 1. 在独立指标计算函数部分添加新函数
-def calculate_ava_vav(data_processed) -> dict:
-    """计算多维度综合加权加速度"""
-    comfort = ComfortCalculator(data_processed)
-    ava_vav_value = comfort.calculate_ava_vav()
-    return {"ava_vav": float(ava_vav_value)}
-
-def calculate_msdv(data_processed) -> dict:
-    """计算晕动剂量值(MSDV)指标"""
-    comfort = ComfortCalculator(data_processed)
-    msdv_value = comfort.calculate_msdv()
-    return {"msdv": float(msdv_value)}
-    
-def calculate_weaving(data_processed) -> dict:
-    """计算蛇行指标"""
-    comfort = ComfortCalculator(data_processed)
-    zigzag_count = comfort.calculate_zigzag_count()
-    return {"weaving": float(zigzag_count)}
-
-def calculate_shake(data_processed) -> dict:
-    """计算晃动指标"""
-    comfort = ComfortCalculator(data_processed)
-    shake_count = comfort.calculate_shake_count()
-    return {"shake": float(shake_count)}
-
-def calculate_cadence(data_processed) -> dict:
-    """计算顿挫指标"""
-    comfort = ComfortCalculator(data_processed)
-    cadence_count = comfort.calculate_cadence_count()
-    return {"cadence": float(cadence_count)}
-
-def calculate_slambrake(data_processed) -> dict:
-    """计算急刹车指标"""
-    comfort = ComfortCalculator(data_processed)
-    slam_brake_count = comfort.calculate_slam_brake_count()
-    return {"slamBrake": float(slam_brake_count)}
-
-def calculate_slamaccelerate(data_processed) -> dict:
-    """计算急加速指标"""
-    comfort = ComfortCalculator(data_processed)
-    slam_accel_count = comfort.calculate_slam_accel_count()
-    return {"slamAccelerate": float(slam_accel_count)}
-
-# 装饰器保持不变
-def peak_valley_decorator(method):
-    def wrapper(self, *args, **kwargs):
-        peak_valley = self._peak_valley_determination(self.df)
-        pv_list = self.df.loc[peak_valley, ['simTime', 'speedH']].values.tolist()
-        if len(pv_list) != 0:
-            flag = True
-            p_last = pv_list[0]
-
-            for i in range(1, len(pv_list)):
-                p_curr = pv_list[i]
-
-                if self._peak_valley_judgment(p_last, p_curr):
-                    # method(self, p_curr, p_last)
-                    method(self, p_curr, p_last, flag, *args, **kwargs)
-                else:
-                    p_last = p_curr
-
-            return method
-        else:
-            flag = False
-            p_curr = [0, 0]
-            p_last = [0, 0]
-            method(self, p_curr, p_last, flag, *args, **kwargs)
-            return method
-
-    return wrapper
-
-
-class ComfortRegistry:
-    """舒适性指标注册器"""
-    
-    def __init__(self, data_processed):
-        self.logger = LogManager().get_logger()  # 获取全局日志实例
-        self.data = data_processed
-        self.comfort_config = data_processed.comfort_config["comfort"]
-        self.metrics = self._extract_metrics(self.comfort_config)
-        self._registry = self._build_registry()
-    
-    def _extract_metrics(self, config_node: dict) -> list:
-        """DFS遍历提取指标"""
-        metrics = []
-        def _recurse(node):
-            if isinstance(node, dict):
-                if 'name' in node and not any(isinstance(v, dict) for v in node.values()):
-                    metrics.append(node['name'])
-                for v in node.values():
-                    _recurse(v)
-        _recurse(config_node)
-        self.logger.info(f'评比的舒适性指标列表:{metrics}')
-        return metrics
-    
-    def _build_registry(self) -> dict:
-        """自动注册指标函数"""
-        registry = {}
-        for metric_name in self.metrics:
-            func_name = f"calculate_{metric_name.lower()}"
-            try:
-                registry[metric_name] = globals()[func_name]
-            except KeyError:
-                self.logger.error(f"未实现指标函数: {func_name}")
-        return registry
-    
-    def batch_execute(self) -> dict:
-        """批量执行指标计算"""
-        results = {}
-        for name, func in self._registry.items():
-            try:
-                result = func(self.data)
-                results.update(result)
-                # 新增:将每个指标的结果写入日志
-                self.logger.info(f'舒适性指标[{name}]计算结果: {result}')
-            except Exception as e:
-                self.logger.error(f"{name} 执行失败: {str(e)}", exc_info=True)
-                results[name] = None
-        self.logger.info(f'舒适性指标计算结果:{results}')
-        return results
-
-
-class ComfortCalculator:
-    """舒适性指标计算类 - 提供核心计算功能"""
-    
-    def __init__(self, data_processed):
-        self.data_processed = data_processed
-        self.logger = LogManager().get_logger()
-        
-        self.data = data_processed.ego_data
-        self.ego_df = pd.DataFrame()
-        self.discomfort_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
-        
-        # 统计指标
-        self.calculated_value = {
-            'weaving': 0, 
-            'shake': 0, 
-            'cadence': 0,
-            'slamBrake': 0, 
-            'slamAccelerate': 0,
-            'ava_vav': 0,  # 添加新指标的默认值
-            'msdv': 0      # 添加MSDV指标的默认值
-        }
-        
-        self.time_list = self.data['simTime'].values.tolist()
-        self.frame_list = self.data['simFrame'].values.tolist()
-        
-        self.zigzag_count = 0
-        self.shake_count = 0
-        self.cadence_count = 0
-        self.slam_brake_count = 0
-        self.slam_accel_count = 0
-        
-        self.zigzag_time_list = []
-        self.zigzag_stre_list = []
-        
-        self._initialize_data()
-    
-    def _initialize_data(self):
-        """初始化数据"""
-        self.ego_df = self.data[COMFORT_INFO].copy()
-        self.df = self.ego_df.reset_index(drop=True)
-        self._prepare_comfort_parameters()
-    
-    def _prepare_comfort_parameters(self):
-        """准备舒适性计算所需参数"""
-        # 计算加减速阈值
-        self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
-        self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
-        self.ego_df['slam_brake'] = (self.ego_df['lon_acc'] - self.ego_df['ip_dec']).apply(
-            lambda x: 1 if x < 0 else 0)
-        self.ego_df['slam_accel'] = (self.ego_df['lon_acc'] - self.ego_df['ip_acc']).apply(
-            lambda x: 1 if x > 0 else 0)
-        self.ego_df['cadence'] = self.ego_df.apply(
-            lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
-
-    
-    def _cal_cur_ego_path(self, row):
-        """计算车辆轨迹曲率"""
-        try:
-            divide = (row['speedX'] ** 2 + row['speedY'] ** 2) ** (3 / 2)
-            if not divide:
-                res = None
-            else:
-                res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
-        except:
-            res = None
-        return res
-    
-    def _peak_valley_determination(self, df):
-        """确定角速度的峰谷"""
-        peaks, _ = scipy.signal.find_peaks(
-            df['speedH'], height=2.3, distance=3, 
-            prominence=2.3, width=1)
-        valleys, _ = scipy.signal.find_peaks(
-            -df['speedH'], height=2.3, distance=3, 
-            prominence=2.3, width=1)
-        return sorted(list(peaks) + list(valleys))
-    
-    def _peak_valley_judgment(self, p_last, p_curr, tw=100, avg=4.6):
-        """判断峰谷是否满足蛇行条件"""
-        t_diff = p_curr[0] - p_last[0]
-        v_diff = abs(p_curr[1] - p_last[1])
-        s = p_curr[1] * p_last[1]
-
-        if t_diff < tw and v_diff > avg and s < 0:
-            if [p_last[0], p_curr[0]] not in self.zigzag_time_list:
-                self.zigzag_time_list.append([p_last[0], p_curr[0]])
-            return True
-        return False
-    
-    def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
-        """处理顿挫数据"""
-        if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
-            return np.nan
-        elif abs(lon_acc) == 0:
-            return 0
-        elif lon_acc > 0 and lon_acc < ip_acc:
-            return 1
-        elif lon_acc < 0 and lon_acc > ip_dec:
-            return -1
-        else:
-            return 0
-    
-    @peak_valley_decorator
-    def _zigzag_count_func(self, p_curr, p_last, flag=True):
-        """计算蛇行次数"""
-        if flag:
-            self.zigzag_count += 1
-        else:
-            self.zigzag_count += 0
-    
-    @peak_valley_decorator
-    def _cal_zigzag_strength(self, p_curr, p_last, flag=True):
-        """计算蛇行强度"""
-        if flag:
-            v_diff = abs(p_curr[1] - p_last[1])
-            t_diff = p_curr[0] - p_last[0]
-            if t_diff > 0:
-                self.zigzag_stre_list.append(v_diff / t_diff)  # 平均角加速度
-        else:
-            self.zigzag_stre_list = []
-    
-    def _get_zigzag_times(self):
-        """获取所有蛇行时间点"""
-        all_times = []
-        for time_range in self.zigzag_time_list:
-            start, end = time_range
-            # 获取这个时间范围内的所有时间点
-            times_in_range = self.ego_df[(self.ego_df['simTime'] >= start) & 
-                                         (self.ego_df['simTime'] <= end)]['simTime'].tolist()
-            all_times.extend(times_in_range)
-        return all_times
-    
-    def calculate_zigzag_count(self):
-        """计算蛇行指标"""
-        self._zigzag_count_func()
-        return self.zigzag_count
-    
-    def calculate_shake_count(self):
-        """计算晃动指标"""
-        self._shake_detector()
-        return self.shake_count
-    
-    def calculate_cadence_count(self):
-        """计算顿挫指标"""
-        self._cadence_detector()
-        return self.cadence_count
-    
-    def calculate_slam_brake_count(self):
-        """计算急刹车指标"""
-        self._slam_brake_detector()
-        return self.slam_brake_count
-    
-    def calculate_slam_accel_count(self):
-        """计算急加速指标"""
-        self._slam_accel_detector()
-        return self.slam_accel_count
-    
-    def _shake_detector(self, T_diff=0.5):
-        """检测晃动事件 - 改进版本(不使用车辆轨迹曲率)"""
-        # lat_acc已经是车辆坐标系下的横向加速度,由data_process.py计算
-        time_list = []
-        frame_list = []
-
-        # 复制数据以避免修改原始数据
-        df = self.ego_df.copy()
-        
-        # 1. 计算横向加速度变化率
-        df['lat_acc_rate'] = df['lat_acc'].diff() / df['simTime'].diff()
-        
-        # 2. 计算横摆角速度变化率
-        df['speedH_rate'] = df['speedH'].diff() / df['simTime'].diff()
-        
-        # 3. 计算横摆角速度的短期变化特性
-        window_size = 5  # 5帧窗口
-        df['speedH_std'] = df['speedH'].rolling(window=window_size, min_periods=2).std()
-        
-        # 4. 基于车速的动态阈值
-        v0 = 20 * 5/18        # ≈5.56 m/s
-        # 递减系数
-        k  = 0.008 * 3.6      # =0.0288 per m/s
-        df['lat_acc_threshold'] = df['v'].apply(
-            lambda speed: max(
-                1.0,                                   # 下限 1.0 m/s²
-                min(
-                    1.8,                               # 上限 1.8 m/s²
-                    1.8 - k * (speed - v0)             # 线性递减
-                )
-            )
-        )
-        
-        df['speedH_threshold'] = df['v'].apply(
-            lambda speed: max(1.5, min(3.0, 2.0 * (1 + (speed - 20) / 60)))
-        )
-        # 将计算好的阈值和中间变量保存到self.ego_df中,供其他函数使用
-        self.ego_df['lat_acc_threshold'] = df['lat_acc_threshold']
-        self.ego_df['speedH_threshold'] = df['speedH_threshold']
-        self.ego_df['lat_acc_rate'] = df['lat_acc_rate']
-        self.ego_df['speedH_rate'] = df['speedH_rate'] 
-        self.ego_df['speedH_std'] = df['speedH_std']
-        
-        # 5. 综合判断晃动条件
-        # 条件A: 横向加速度超过阈值
-        condition_A = df['lat_acc'].abs() > df['lat_acc_threshold']
-        
-        # 条件B: 横向加速度变化率超过阈值
-        lat_acc_rate_threshold = 0.5  # 横向加速度变化率阈值 (m/s³)
-        condition_B = df['lat_acc_rate'].abs() > lat_acc_rate_threshold
-        
-        # 条件C: 横摆角速度有明显变化但不呈现周期性
-        condition_C = (df['speedH_std'] > df['speedH_threshold']) & (~df['simTime'].isin(self._get_zigzag_times()))
-        
-        # 综合条件: 满足条件A,且满足条件B或条件C
-        shake_condition = condition_A & (condition_B | condition_C)
-        
-        # 筛选满足条件的数据
-        shake_df = df[shake_condition].copy()
-        
-        # 按照连续帧号分组,确保只有连续帧超过阈值的才被认为是晃动
-        if not shake_df.empty:
-            shake_df['frame_diff'] = shake_df['simFrame'].diff().fillna(0)
-            shake_df['group'] = (shake_df['frame_diff'] > T_diff).cumsum()
-            
-            # 分组统计
-            shake_groups = shake_df.groupby('group')
-            
-            for _, group in shake_groups:
-                if len(group) >= 2:  # 至少2帧才算一次晃动
-                    time_list.extend(group['simTime'].values)
-                    frame_list.extend(group['simFrame'].values)
-                    self.shake_count += 1
-        
-        # 分组处理
-        TIME_RANGE = 1
-        t_list = time_list
-        f_list = frame_list
-        group_time = []
-        group_frame = []
-        sub_group_time = []
-        sub_group_frame = []
-        
-        if len(f_list) > 0:
-            for i in range(len(f_list)):
-                if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE:
-                    sub_group_time.append(t_list[i])
-                    sub_group_frame.append(f_list[i])
-                else:
-                    group_time.append(sub_group_time)
-                    group_frame.append(sub_group_frame)
-                    sub_group_time = [t_list[i]]
-                    sub_group_frame = [f_list[i]]
-
-            group_time.append(sub_group_time)
-            group_frame.append(sub_group_frame)
-
-        # 输出图表值
-        shake_time = [[g[0], g[-1]] for g in group_time]
-        shake_frame = [[g[0], g[-1]] for g in group_frame]
-        self.shake_count = len(shake_time)
-
-        if shake_time:
-            time_df = pd.DataFrame(shake_time, columns=['start_time', 'end_time'])
-            frame_df = pd.DataFrame(shake_frame, columns=['start_frame', 'end_frame'])
-            discomfort_df = pd.concat([time_df, frame_df], axis=1)
-            discomfort_df['type'] = 'shake'
-            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
-
-        return time_list
-    
-    def _cadence_detector(self):
-        """顿挫检测器"""
-        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'cadence']].copy()
-        time_list = data['simTime'].values.tolist()
-
-        data = data[data['cadence'] != np.nan]
-        data['cadence_diff'] = data['cadence'].diff()
-        data.dropna(subset='cadence_diff', inplace=True)
-        data = data[data['cadence_diff'] != 0]
-
-        t_list = data['simTime'].values.tolist()
-        f_list = data['simFrame'].values.tolist()
-
-        TIME_RANGE = 1
-        group_time = []
-        group_frame = []
-        sub_group_time = []
-        sub_group_frame = []
-        for i in range(len(f_list)):
-            if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE:  # 特征点相邻一秒内的,算作同一组顿挫
-                sub_group_time.append(t_list[i])
-                sub_group_frame.append(f_list[i])
-            else:
-                group_time.append(sub_group_time)
-                group_frame.append(sub_group_frame)
-                sub_group_time = [t_list[i]]
-                sub_group_frame = [f_list[i]]
-
-        group_time.append(sub_group_time)
-        group_frame.append(sub_group_frame)
-        group_time = [g for g in group_time if len(g) >= 1]  # 有一次特征点则算作一次顿挫
-        group_frame = [g for g in group_frame if len(g) >= 1]
-
-        # 输出图表值
-        cadence_time = [[g[0], g[-1]] for g in group_time]
-        cadence_frame = [[g[0], g[-1]] for g in group_frame]
-
-        if cadence_time:
-            time_df = pd.DataFrame(cadence_time, columns=['start_time', 'end_time'])
-            frame_df = pd.DataFrame(cadence_frame, columns=['start_frame', 'end_frame'])
-            discomfort_df = pd.concat([time_df, frame_df], axis=1)
-            discomfort_df['type'] = 'cadence'
-            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
-
-        # 将顿挫组的起始时间为组重新统计时间
-        cadence_time_list = [time for pair in cadence_time for time in time_list if pair[0] <= time <= pair[1]]
-
-        stre_list = []
-        freq_list = []
-        for g in group_time:
-            # calculate strength
-            g_df = data[data['simTime'].isin(g)]
-            strength = g_df['lon_acc'].abs().mean()
-            stre_list.append(strength)
-
-            # calculate frequency
-            cnt = len(g)
-            t_start = g_df['simTime'].iloc[0]
-            t_end = g_df['simTime'].iloc[-1]
-            t_delta = t_end - t_start
-            frequency = cnt / t_delta
-            freq_list.append(frequency)
-
-        self.cadence_count = len(freq_list)
-        cadence_stre = sum(stre_list) / len(stre_list) if stre_list else 0
-
-        return cadence_time_list
-    
-    def _slam_brake_detector(self):
-        """急刹车检测器"""
-        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'ip_dec', 'slam_brake']].copy()
-        res_df = data[data['slam_brake'] == 1]
-        t_list = res_df['simTime'].values
-        f_list = res_df['simFrame'].values.tolist()
-
-        TIME_RANGE = 1
-        group_time = []
-        group_frame = []
-        sub_group_time = []
-        sub_group_frame = []
-        for i in range(len(f_list)):
-            if not sub_group_time or f_list[i] - f_list[i - 1] <= TIME_RANGE:  # 连续帧的算作同一组急刹
-                sub_group_time.append(t_list[i])
-                sub_group_frame.append(f_list[i])
-            else:
-                group_time.append(sub_group_time)
-                group_frame.append(sub_group_frame)
-                sub_group_time = [t_list[i]]
-                sub_group_frame = [f_list[i]]
-
-        group_time.append(sub_group_time)
-        group_frame.append(sub_group_frame)
-        group_time = [g for g in group_time if len(g) >= 2]  # 达到两帧算作一次急刹
-        group_frame = [g for g in group_frame if len(g) >= 2]
-
-        # 输出图表值
-        slam_brake_time = [[g[0], g[-1]] for g in group_time]
-        slam_brake_frame = [[g[0], g[-1]] for g in group_frame]
-
-        if slam_brake_time:
-            time_df = pd.DataFrame(slam_brake_time, columns=['start_time', 'end_time'])
-            frame_df = pd.DataFrame(slam_brake_frame, columns=['start_frame', 'end_frame'])
-            discomfort_df = pd.concat([time_df, frame_df], axis=1)
-            discomfort_df['type'] = 'slam_brake'
-            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
-
-        time_list = [element for sublist in group_time for element in sublist]
-        self.slam_brake_count = len(group_time)
-        return time_list
-    
-    def _slam_accel_detector(self):
-        """急加速检测器"""
-        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'ip_acc', 'slam_accel']].copy()
-        res_df = data.loc[data['slam_accel'] == 1]
-        t_list = res_df['simTime'].values
-        f_list = res_df['simFrame'].values.tolist()
-
-        group_time = []
-        group_frame = []
-        sub_group_time = []
-        sub_group_frame = []
-        for i in range(len(f_list)):
-            if not group_time or f_list[i] - f_list[i - 1] <= 1:  # 连续帧的算作同一组急加速
-                sub_group_time.append(t_list[i])
-                sub_group_frame.append(f_list[i])
-            else:
-                group_time.append(sub_group_time)
-                group_frame.append(sub_group_frame)
-                sub_group_time = [t_list[i]]
-                sub_group_frame = [f_list[i]]
-
-        group_time.append(sub_group_time)
-        group_frame.append(sub_group_frame)
-        group_time = [g for g in group_time if len(g) >= 2]
-        group_frame = [g for g in group_frame if len(g) >= 2]
-
-        # 输出图表值
-        slam_accel_time = [[g[0], g[-1]] for g in group_time]
-        slam_accel_frame = [[g[0], g[-1]] for g in group_frame]
-
-        if slam_accel_time:
-            time_df = pd.DataFrame(slam_accel_time, columns=['start_time', 'end_time'])
-            frame_df = pd.DataFrame(slam_accel_frame, columns=['start_frame', 'end_frame'])
-            discomfort_df = pd.concat([time_df, frame_df], axis=1)
-            discomfort_df['type'] = 'slam_accel'
-            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
-
-        time_list = [element for sublist in group_time for element in sublist]
-        self.slam_accel_count = len(group_time)
-        return time_list
-
-
-class ComfortManager:
-    """舒适性指标计算主类"""
-    
-    def __init__(self, data_processed):
-        self.data = data_processed
-        self.logger = LogManager().get_logger()
-        self.registry = ComfortRegistry(self.data)
-
-    def report_statistic(self):
-        """生成舒适性评分报告"""
-        comfort_result = self.registry.batch_execute()
-        
-        return comfort_result
-
-
-if __name__ == '__main__':
-    case_name = 'ICA'
-    mode_label = 'PGVIL'
-    
-    data = data_process.DataPreprocessing(case_name, mode_label)
-    comfort_instance = ComfortManager(data)
-    
-    try:  
-        comfort_result = comfort_instance.report_statistic() 
-        result = {'comfort': comfort_result}
-        print(result) 
-    except Exception as e:  
-        print(f"An error occurred in Comfort.report_statistic: {e}")
-
-# 将之前定义在类外部的方法移动到ComfortCalculator类内部
-def _apply_frequency_weighting(self, acceleration_data, weighting_type='Wk', fs=100):
-    """应用ISO 2631-1:1997标准的频率加权滤波
-    
-    参数:
-        acceleration_data: 加速度时间序列数据
-        weighting_type: 加权类型,可选值包括:
-            - 'Wk': 垂直方向(Z轴)加权
-            - 'Wd': 水平方向(X和Y轴)加权
-            - 'Wf': 运动病相关加权
-        fs: 采样频率(Hz)
-        
-    返回:
-        加权后的加速度数据
-    """
-    # 检查数据有效性
-    if acceleration_data.empty or acceleration_data.isna().all():
-        return acceleration_data
-        
-    # 根据ISO 2631-1:1997标准设计滤波器
-    # 这些参数来自标准文档,用于构建数字滤波器
-    if weighting_type == 'Wk':  # 垂直方向(Z轴)
-        # Wk滤波器参数
-        f1 = 0.4
-        f2 = 100.0
-        f3 = 12.5
-        f4 = 12.5
-        Q1 = 0.63
-        Q2 = 0.5
-        Q3 = 0.63
-        Q4 = 0.63
-        K = 0.4
-    elif weighting_type == 'Wd':  # 水平方向(X和Y轴)
-        # Wd滤波器参数
-        f1 = 0.4
-        f2 = 100.0
-        f3 = 2.0
-        f4 = 2.0
-        Q1 = 0.63
-        Q2 = 0.5
-        Q3 = 0.63
-        Q4 = 0.63
-        K = 0.4
-    elif weighting_type == 'Wf':  # 运动病相关
-        # Wf滤波器参数
-        f1 = 0.08
-        f2 = 0.63
-        f3 = 0.25
-        f4 = 0.8
-        Q1 = 0.63
-        Q2 = 0.86
-        Q3 = 0.8
-        Q4 = 0.8
-        K = 1.0
-    else:
-        self.logger.warning(f"未知的加权类型: {weighting_type},使用原始数据")
-        return acceleration_data
-    
-    # 将频率转换为角频率
-    w1 = 2 * np.pi * f1
-    w2 = 2 * np.pi * f2
-    w3 = 2 * np.pi * f3
-    w4 = 2 * np.pi * f4
-    
-    # 设计高通滤波器(s域)
-    b1 = [K * w1**2, 0]
-    a1 = [1, w1/Q1, w1**2]
-    
-    # 设计低通滤波器(s域)
-    b2 = [K, 0, 0]
-    a2 = [1, w2/Q2, w2**2]
-    
-    # 设计加速度-速度转换滤波器(s域)
-    b3 = [K, 0]
-    a3 = [1, w3/Q3, w3**2]
-    
-    # 设计上升滤波器(s域)
-    b4 = [K, 0, 0]
-    a4 = [1, w4/Q4, w4**2]
-    
-    # 使用双线性变换将s域滤波器转换为z域
-    b1_z, a1_z = scipy.signal.bilinear(b1, a1, fs)
-    b2_z, a2_z = scipy.signal.bilinear(b2, a2, fs)
-    b3_z, a3_z = scipy.signal.bilinear(b3, a3, fs)
-    b4_z, a4_z = scipy.signal.bilinear(b4, a4, fs)
-    
-    # 应用滤波器链
-    data_np = acceleration_data.to_numpy()
-    filtered_data = scipy.signal.lfilter(b1_z, a1_z, data_np)
-    filtered_data = scipy.signal.lfilter(b2_z, a2_z, filtered_data)
-    filtered_data = scipy.signal.lfilter(b3_z, a3_z, filtered_data)
-    filtered_data = scipy.signal.lfilter(b4_z, a4_z, filtered_data)
-    
-    return pd.Series(filtered_data, index=acceleration_data.index)
-
-def calculate_ava_vav(self):
-    """计算多维度综合加权加速度
-    
-    基于ISO 2631-1:1997标准,综合考虑车辆在三个平移方向和三个旋转方向的加速度或角速度
-    
-    Returns:
-        float: 多维度综合加权加速度值
-    """
-    # 定义各方向的权重系数
-    k_x = 1.0  # X方向加速度权重
-    k_y = 1.0  # Y方向加速度权重
-    k_z = 1.0  # Z方向加速度权重
-    k_roll = 0.63  # 横滚角速度权重
-    k_pitch = 0.8  # 俯仰角速度权重
-    k_yaw = 0.5  # 偏航角速度权重
-    
-    # 获取数据
-    df = self.ego_df.copy()
-    
-    # 确保有必要的列
-    if 'accelX' not in df.columns or 'accelY' not in df.columns:
-        self.logger.warning("缺少计算多维度综合加权加速度所需的数据列")
-        return self.calculated_value['ava_vav']
-    
-    # 将东北天坐标系下的加速度转换为车身坐标系下的加速度
-    # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
-    if 'posH' not in df.columns:
-        self.logger.warning("缺少航向角数据,无法进行坐标转换")
-        return self.calculated_value['ava_vav']
-        
-    df['posH_rad'] = np.radians(df['posH'])
-    
-    # 转换加速度到车身坐标系
-    # 注意:posH是航向角,北向为0度,顺时针为正
-    # 车身X轴 = 东向*sin(posH) + 北向*cos(posH)
-    # 车身Y轴 = 东向*cos(posH) - 北向*sin(posH)
-    df['a_x_body'] = df['accelX'] * np.sin(df['posH_rad']) + df['accelY'] * np.cos(df['posH_rad'])
-    df['a_y_body'] = df['accelX'] * np.cos(df['posH_rad']) - df['accelY'] * np.sin(df['posH_rad'])
-    
-    # Z方向加速度,如果没有则假设为0
-    df['a_z_body'] = df['accelZ'] if 'accelZ' in df.columns else pd.Series(np.zeros(len(df)))
-    
-    # 角速度数据,如果没有则使用角速度变化率代替
-    # 注意:speedH是航向角速度,需要转换为车身坐标系下的偏航角速度
-    omega_roll = df['rollRate'] if 'rollRate' in df.columns else pd.Series(np.zeros(len(df)))
-    omega_pitch = df['pitchRate'] if 'pitchRate' in df.columns else pd.Series(np.zeros(len(df)))
-    omega_yaw = df['speedH']  # 使用航向角速度作为偏航角速度
-    
-    # 应用ISO 2631-1:1997标准的频率加权滤波
-    # 估计采样频率 - 假设数据是均匀采样的
-    if len(df) > 1:
-        time_diff = df['simTime'].diff().median()
-        fs = 1.0 / time_diff if time_diff > 0 else 100  # 默认100Hz
-    else:
-        fs = 100  # 默认采样频率
-    
-    # 对各方向加速度应用适当的频率加权
-    a_x_weighted = self._apply_frequency_weighting(df['a_x_body'], 'Wd', fs)
-    a_y_weighted = self._apply_frequency_weighting(df['a_y_body'], 'Wd', fs)
-    a_z_weighted = self._apply_frequency_weighting(df['a_z_body'], 'Wk', fs)
-    
-    # 对角速度也应用适当的频率加权
-    # 注意:ISO标准没有直接指定角速度的加权,这里使用简化处理
-    omega_roll_weighted = omega_roll  # 可以根据需要应用适当的滤波
-    omega_pitch_weighted = omega_pitch
-    omega_yaw_weighted = omega_yaw
-    
-    # 计算加权均方根值 (r.m.s.)
-    # 对每个方向的加速度/角速度平方后求平均,再开平方根
-    a_x_rms = np.sqrt(np.mean(a_x_weighted**2))
-    a_y_rms = np.sqrt(np.mean(a_y_weighted**2))
-    a_z_rms = np.sqrt(np.mean(a_z_weighted**2))
-    omega_roll_rms = np.sqrt(np.mean(omega_roll_weighted**2))
-    omega_pitch_rms = np.sqrt(np.mean(omega_pitch_weighted**2))
-    omega_yaw_rms = np.sqrt(np.mean(omega_yaw_weighted**2))
-    
-    # 计算综合加权加速度
-    ava_vav = np.sqrt(
-        k_x * a_x_rms**2 + 
-        k_y * a_y_rms**2 + 
-        k_z * a_z_rms**2 + 
-        k_roll * omega_roll_rms**2 + 
-        k_pitch * omega_pitch_rms**2 + 
-        k_yaw * omega_yaw_rms**2
-    )
-    
-    # 记录计算结果
-    self.calculated_value['ava_vav'] = ava_vav
-    self.logger.info(f"多维度综合加权加速度(ava_vav)计算结果: {ava_vav}")
-    
-    return ava_vav
-
-
-def calculate_msdv(self):
-        """计算晕动剂量值(Motion Sickness Dose Value, MSDV)
-        
-        MSDV用于量化乘员因持续振动而产生的晕动风险,其物理意义是
-        "频率加权后的加速度有效值的平方对时间的累积",
-        能够反映乘员在一定时间内受到振动刺激的总量。
-        
-        计算公式: MSDV = ∫[0,T] |a_ω(t)|² dt
-        
-        Returns:
-            float: 晕动剂量值
-        """
-        # 获取数据
-        df = self.ego_df.copy()
-        
-        # 确保有必要的列
-        if 'accelX' not in df.columns or 'accelY' not in df.columns:
-            self.logger.warning("缺少计算晕动剂量值所需的数据列")
-            return self.calculated_value['msdv']
-        
-        # 将东北天坐标系下的加速度转换为车身坐标系下的加速度
-        if 'posH' not in df.columns:
-            self.logger.warning("缺少航向角数据,无法进行坐标转换")
-            return self.calculated_value['msdv']
-            
-        # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
-        df['posH_rad'] = np.radians(df['posH'])
-        
-        # 转换加速度到车身坐标系
-        # 注意:posH是航向角,北向为0度,顺时针为正
-        # 车身X轴 = 东向*sin(posH) + 北向*cos(posH)
-        # 车身Y轴 = 东向*cos(posH) - 北向*sin(posH)
-        df['a_x_body'] = df['accelX'] * np.sin(df['posH_rad']) + df['accelY'] * np.cos(df['posH_rad'])
-        df['a_y_body'] = df['accelX'] * np.cos(df['posH_rad']) - df['accelY'] * np.sin(df['posH_rad'])
-        
-        # Z方向加速度,如果没有则假设为0
-        df['a_z_body'] = df['accelZ'] if 'accelZ' in df.columns else pd.Series(np.zeros(len(df)))
-        
-        # 计算时间差
-        df['time_diff'] = df['simTime'].diff().fillna(0)
-        
-        # 应用ISO 2631-1:1997标准的频率加权滤波
-        # 估计采样频率 - 假设数据是均匀采样的
-        if len(df) > 1:
-            time_diff = df['simTime'].diff().median()
-            fs = 1.0 / time_diff if time_diff > 0 else 100  # 默认100Hz
-        else:
-            fs = 100  # 默认采样频率
-        
-        # 对各方向加速度应用适当的频率加权
-        # 对于晕动评估,使用Wf加权滤波器
-        a_x_weighted = self._apply_frequency_weighting(df['a_x_body'], 'Wf', fs)
-        a_y_weighted = self._apply_frequency_weighting(df['a_y_body'], 'Wf', fs)
-        a_z_weighted = self._apply_frequency_weighting(df['a_z_body'], 'Wf', fs)
-        
-        # 计算MSDV - 对加速度平方进行时间积分
-        # 对于X方向(前后方向)- 主要影响晕动感
-        msdv_x = np.sqrt(np.sum(a_x_weighted**2 * df['time_diff']))
-        
-        # 对于Y方向(左右方向)
-        msdv_y = np.sqrt(np.sum(a_y_weighted**2 * df['time_diff']))
-        
-        # 对于Z方向(上下方向)- 也对晕动有显著影响
-        msdv_z = np.sqrt(np.sum(a_z_weighted**2 * df['time_diff']))
-        
-        # 综合MSDV - 可以使用向量和或加权和
-        # 根据ISO 2631标准,垂直方向(Z)的权重通常更高
-        msdv = np.sqrt(msdv_x**2 + msdv_y**2 + (1.4 * msdv_z)**2)
-        
-        # 记录计算结果
-        self.calculated_value['msdv'] = msdv
-        self.logger.info(f"晕动剂量值(MSDV)计算结果: {msdv}")
-        
-        return msdv

+ 419 - 170
modules/metric/comfort.py

@@ -17,16 +17,17 @@ import math
 import scipy.signal
 import pandas as pd
 import numpy as np
+import os
 from pathlib import Path 
 from typing import Dict, List, Any, Optional, Callable, Union, Tuple
 
 from modules.lib.score import Score
 from modules.lib.common import get_interpolation, get_frame_with_time
 from modules.lib import data_process
-
 from modules.lib.log_manager import LogManager
+from modules.lib.chart_generator import generate_comfort_chart_data
 
-# 更新COMFORT_INFO列表,添加posH字段
+# 更新COMFORT_INFO列表,添加车辆坐标系下的速度和加速度字段
 COMFORT_INFO = [
     "simTime",
     "simFrame",
@@ -44,7 +45,11 @@ COMFORT_INFO = [
     "lon_acc_roc",
     "speedH",
     "accelH",
-    "posH",  # 添加航向角字段
+    "posH",           # 航向角字段
+    "lon_v_vehicle",   # 车辆坐标系下的纵向速度
+    "lat_v_vehicle",   # 车辆坐标系下的横向速度
+    "lon_acc_vehicle", # 车辆坐标系下的纵向加速度
+    "lat_acc_vehicle"  # 车辆坐标系下的横向加速度
 ]
 # ----------------------
 # 独立指标计算函数
@@ -85,11 +90,11 @@ def calculate_msdv(data_processed) -> dict:
     msdv_value = comfort.calculate_msdv()
     return {"msdv": float(msdv_value)}
     
-def calculate_weaving(data_processed) -> dict:
+def calculate_zigzag(data_processed) -> dict:
     """计算蛇行指标"""
     comfort = ComfortCalculator(data_processed)
     zigzag_count = comfort.calculate_zigzag_count()
-    return {"weaving": float(zigzag_count)}
+    return {"zigzag": float(zigzag_count)}
 
 def calculate_shake(data_processed) -> dict:
     """计算晃动指标"""
@@ -153,6 +158,7 @@ class ComfortRegistry:
         self.comfort_config = data_processed.comfort_config["comfort"]
         self.metrics = self._extract_metrics(self.comfort_config)
         self._registry = self._build_registry()
+        self.output_dir = None  # 图表数据输出目录
     
     def _extract_metrics(self, config_node: dict) -> list:
         """DFS遍历提取指标"""
@@ -197,6 +203,23 @@ class ComfortRegistry:
 class ComfortCalculator:
     """舒适性指标计算类 - 提供核心计算功能"""
     
+    def generate_metric_chart(self, metric_name: str) -> None:
+        """
+        生成指标图表
+        
+        Args:
+            metric_name: 指标名称
+        """
+        # 设置输出目录
+        if not hasattr(self, 'output_dir') or not self.output_dir:
+            self.output_dir = os.path.join(os.getcwd(), 'data')
+            os.makedirs(self.output_dir, exist_ok=True)
+        
+        # 调用chart_generator中的函数生成图表
+        chart_path = generate_comfort_chart_data(self, metric_name, self.output_dir)
+        if chart_path:
+            self.logger.info(f"{metric_name}图表已生成: {chart_path}")
+    
     def __init__(self, data_processed):
         self.data_processed = data_processed
         self.logger = LogManager().get_logger()
@@ -207,7 +230,7 @@ class ComfortCalculator:
         
         # 统计指标
         self.calculated_value = {
-            'weaving': 0, 
+            'zigzag': 0, 
             'shake': 0, 
             'cadence': 0,
             'slamBrake': 0, 
@@ -231,6 +254,7 @@ class ComfortCalculator:
         
         self.zigzag_time_list = []
         self.zigzag_stre_list = []
+        self.shake_events = []  # 用于存储晃动事件数据
         
         self._initialize_data()
     
@@ -242,15 +266,51 @@ class ComfortCalculator:
     
     def _prepare_comfort_parameters(self):
         """准备舒适性计算所需参数"""
-        # 计算加减速阈值
-        self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
-        self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
-        self.ego_df['slam_brake'] = (self.ego_df['lon_acc'] - self.ego_df['ip_dec']).apply(
+        # 检查是否已经有车身坐标系下的加速度和速度数据
+        # 这些数据应该在data_process.py的process_ego_data方法中已经计算好
+        if 'lon_acc_vehicle' in self.ego_df.columns and 'lat_acc_vehicle' in self.ego_df.columns:
+            # 直接使用已转换的数据
+            self.logger.info("使用data_process中预先计算的车身坐标系加速度和速度数据")
+        elif 'posH' in self.ego_df.columns:
+            # 如果没有预先计算的数据,但有航向角,则进行计算
+            # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
+            self.logger.warning("未找到预先计算的车身坐标系数据,进行实时计算")
+            self.ego_df['heading_rad'] = np.deg2rad(90 - self.ego_df['posH'])  # 转换为与x轴的夹角
+            
+            # 转换加速度到车身坐标系 - 使用与data_process.py相同的转换方法
+            self.ego_df['lon_acc_vehicle'] = self.ego_df['accelX'] * np.cos(self.ego_df['heading_rad']) + \
+                                          self.ego_df['accelY'] * np.sin(self.ego_df['heading_rad'])
+            self.ego_df['lat_acc_vehicle'] = -self.ego_df['accelX'] * np.sin(self.ego_df['heading_rad']) + \
+                                          self.ego_df['accelY'] * np.cos(self.ego_df['heading_rad'])
+            
+            # 转换速度到车身坐标系
+            self.ego_df['lon_v_vehicle'] = self.ego_df['speedX'] * np.cos(self.ego_df['heading_rad']) + \
+                                        self.ego_df['speedY'] * np.sin(self.ego_df['heading_rad'])
+            self.ego_df['lat_v_vehicle'] = -self.ego_df['speedX'] * np.sin(self.ego_df['heading_rad']) + \
+                                        self.ego_df['speedY'] * np.cos(self.ego_df['heading_rad'])
+        else:
+            self.logger.warning("缺少航向角数据,无法将数据转换为车身坐标系")
+        
+        # 计算加减速阈值 - 使用车辆坐标系下的纵向速度代替合速度
+        speed_field = 'lon_v_vehicle' if 'lon_v_vehicle' in self.ego_df.columns else 'v'
+        self.logger.info(f"加减速阈值计算使用的速度字段: {speed_field}")
+        
+        self.ego_df['ip_acc'] = self.ego_df[speed_field].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
+        self.ego_df['ip_dec'] = self.ego_df[speed_field].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
+        
+        # 使用车辆坐标系下的纵向加速度计算急刹车和急加速
+        acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in self.ego_df.columns else 'lon_acc'
+        self.logger.info(f"急刹车和急加速检测使用的加速度字段: {acc_field}")
+        
+        # 使用车辆坐标系下的纵向加速度与阈值比较,判断急刹车和急加速
+        self.ego_df['slam_brake'] = (self.ego_df[acc_field] - self.ego_df['ip_dec']).apply(
             lambda x: 1 if x < 0 else 0)
-        self.ego_df['slam_accel'] = (self.ego_df['lon_acc'] - self.ego_df['ip_acc']).apply(
+        self.ego_df['slam_accel'] = (self.ego_df[acc_field] - self.ego_df['ip_acc']).apply(
             lambda x: 1 if x > 0 else 0)
+        
+        # 确保cadence列使用车辆坐标系下的纵向加速度计算
         self.ego_df['cadence'] = self.ego_df.apply(
-            lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
+            lambda row: self._cadence_process_new(row[acc_field], row['ip_acc'], row['ip_dec']), axis=1)
 
     def _apply_frequency_weighting(self, acceleration_data, weighting_type='Wk', fs=100):
         """应用ISO 2631-1:1997标准的频率加权滤波
@@ -356,6 +416,11 @@ class ComfortCalculator:
         
         相较于MSDV的二次累积,VDV的四次累积使其对高幅值短时冲击更为敏感,
         能够更准确地反映剧烈颠簸对乘员舒适度的不利影响。
+        如你正在开发自动驾驶体验分析模块,建议:
+
+        MSDV → 用于标记低频摇摆造成的晕车风险
+
+        VDV → 用于标记车内座椅或底盘振动强度(比如悬挂问题、路面冲击)
         
         Returns:
             float: 振动剂量值
@@ -448,6 +513,12 @@ class ComfortCalculator:
         
         Returns:
             float: 晕车概率(0-100%)
+
+        晕车概率 (%)	晕车程度分级	         建议动作
+        0 - 10%	    无不适感(Comfortable) 	无需干预
+        10% - 30%	轻度不适(Slight)	        可接受
+        30% - 50%	中度不适(Moderate)	    需关注驾驶方式
+        > 50%	    明显不适(Discomfort)	    应考虑优化轨迹、减速、减小加加速度变化率
         """
         # 获取数据
         df = self.ego_df.copy()
@@ -534,6 +605,15 @@ class ComfortCalculator:
         
         Returns:
             float: 运动舒适度指数(0-10)
+
+        | 舒适指数 `comfort_index` | 等级   | 描述           |
+| -------------------- | ---- | ------------ |
+| **8 - 10**           | 非常舒适 | 几乎无晃动        |
+| **6 - 8**            | 舒适   | 偶有轻微晃动       |
+| **4 - 6**            | 一般   | 有一定晃动感       |
+| **2 - 4**            | 不适   | 明显晃动、影响乘坐感受  |
+| **0 - 2**            | 极度不适 | 强烈晃动,需优化控制系统 |
+
         """
         # 获取数据
         df = self.ego_df.copy()
@@ -905,7 +985,18 @@ class ComfortCalculator:
         return False
     
     def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
-        """处理顿挫数据"""
+        """处理顿挫数据
+        
+        使用车辆坐标系下的纵向加速度判断顿挫
+        
+        Args:
+            lon_acc: 纵向加速度(车辆坐标系)
+            ip_acc: 加速阈值
+            ip_dec: 减速阈值
+            
+        Returns:
+            int/float: nan表示不符合顿挫条件,1表示加速顿挫,-1表示减速顿挫,0表示正常
+        """
         if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
             return np.nan
         elif abs(lon_acc) == 0:
@@ -918,10 +1009,43 @@ class ComfortCalculator:
             return 0
     
     @peak_valley_decorator
-    def _zigzag_count_func(self, p_curr, p_last, flag=True):
-        """计算蛇行次数"""
+    def _zigzag_detector(self, p_curr, p_last, flag=True):
+        """检测蛇行事件"""
         if flag:
-            self.zigzag_count += 1
+            # 记录蛇行事件的起止时间和帧号
+            start_time = p_last[0]
+            end_time = p_curr[0]
+            start_frame = get_frame_with_time(self.time_list, self.frame_list, start_time)
+            end_frame = get_frame_with_time(self.time_list, self.frame_list, end_time)
+            
+            # 计算事件持续时间
+            duration = end_time - start_time
+            
+            # 设置最小持续时间阈值
+            min_duration = 0.5  # 秒
+            
+            if duration >= min_duration:
+                # 更新蛇行计数
+                self.zigzag_count += 1
+                
+                # 添加到不舒适事件表
+                new_row = pd.DataFrame([{
+                    'start_time': start_time,
+                    'end_time': end_time,
+                    'start_frame': start_frame,
+                    'end_frame': end_frame,
+                    'type': 'zigzag'
+                }])
+                self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
+                
+                # 记录事件信息到zigzag_time_list
+                self.zigzag_time_list.append({
+                    'start_time': start_time,
+                    'end_time': end_time,
+                    'start_frame': start_frame,
+                    'end_frame': end_frame,
+                    'duration': duration
+                })
         else:
             self.zigzag_count += 0
     
@@ -949,200 +1073,296 @@ class ComfortCalculator:
     
     def calculate_zigzag_count(self):
         """计算蛇行指标"""
-        self._zigzag_count_func()
+        self._zigzag_detector()
+        
+        # 生成蛇行指标图表
+        self.generate_metric_chart('zigzag')
+        
         return self.zigzag_count
     
     def calculate_shake_count(self):
         """计算晃动指标"""
         self._shake_detector()
+        
+        # 生成晃动指标图表
+        self.generate_metric_chart('shake')
+        
         return self.shake_count
     
     def calculate_cadence_count(self):
         """计算顿挫指标"""
-        self._cadence_detector()
+        # 调用顿挫检测器
+        cadence_time_ranges = self._cadence_detector()
+        
+        # 记录检测结果
+        self.calculated_value['cadence'] = self.cadence_count
+        
+        # 生成顿挫指标图表
+        self.generate_metric_chart('cadence')
+        
         return self.cadence_count
     
     def calculate_slam_brake_count(self):
         """计算急刹车指标"""
         self._slam_brake_detector()
+        
+        # 生成急刹车指标图表
+        self.generate_metric_chart('slamBrake')
+        
         return self.slam_brake_count
+        
     
     def calculate_slam_accel_count(self):
         """计算急加速指标"""
         self._slam_accel_detector()
+        
+        # 生成急加速指标图表
+        self.generate_metric_chart('slamAccelerate')
+        
         return self.slam_accel_count
+        
+
+    def calculate_vdv(self):
+        """计算振动剂量值(Vibration Dose Value, VDV)指标"""
+        vdv_value = super().calculate_vdv()
+        
+        # 生成VDV指标图表
+        self.generate_metric_chart('vdv')
+        
+        return vdv_value
+
+    def calculate_ava_vav(self):
+        """计算多维度综合加权加速度"""
+        ava_vav_value = super().calculate_ava_vav()
+        
+        # 生成AVA_VAV指标图表
+        self.generate_metric_chart('ava_vav')
+        
+        return ava_vav_value
+
+    def calculate_msdv(self):
+        """计算晕动剂量值(Motion Sickness Dose Value, MSDV)"""
+        msdv_value = super().calculate_msdv()
+        
+        # 生成MSDV指标图表
+        self.generate_metric_chart('msdv')
+        
+        return msdv_value
     
-    def _shake_detector(self):
-        """检测晃动事件"""
+    def _shake_detector(self, T_diff=0.5):
+        """检测晃动事件 - 改进版本(使用向量化操作)
+        
+        该函数通过以下步骤检测车辆晃动事件:
+        1. 计算横向加速度变化率和横摆角速度变化率
+        2. 分析横摆角速度的短期变化特性
+        3. 设置基于车速的动态阈值
+        4. 综合多个条件判断晃动事件
+        5. 对连续帧进行分组处理
+        6. 保存晃动事件数据到CSV文件
+        7. 将保存的CSV数据可视化并为晃动事件发生的时间段添加背景颜色
+        """
         # 获取数据
         df = self.ego_df.copy()
         
         # 检查是否有必要的列
-        if 'lat_acc' not in df.columns:
+        if 'lat_acc' not in df.columns or 'posH' not in df.columns:
             self.logger.warning("缺少计算晃动指标所需的数据列")
             return
             
-        # 设置晃动检测阈值
-        shake_threshold = 1.5  # 横向加速度阈值 m/s²
-        min_duration = 0.5     # 最小持续时间 秒
+        # 将东北天坐标系下的数据转换为车身坐标系
+        # 车身坐标系:X轴指向车头,Y轴指向车辆左侧,Z轴指向车顶
+        df['posH_rad'] = np.radians(df['posH'])
+        
+        # 转换横向加速度到车身坐标系
+        df['lat_acc_body'] = df['lat_acc'] * np.cos(df['posH_rad']) - df['lon_acc'] * np.sin(df['posH_rad'])
+        
+        # 转换横摆角速度到车身坐标系
+        # speedH已经是车身坐标系下的横摆角速度,不需要转换
+        df['speedH_body'] = df['speedH']
+        
+        # 1. 计算横向加速度变化率(使用车身坐标系下的横向加速度)
+        df['lat_acc_rate'] = df['lat_acc_body'].diff() / df['simTime'].diff()
+        
+        # 2. 计算横摆角速度变化率(使用车身坐标系下的横摆角速度)
+        df['speedH_rate'] = df['speedH_body'].diff() / df['simTime'].diff()
+        
+        # 3. 计算横摆角速度的短期变化特性
+        window_size = 10  # 10帧窗口
+        df['speedH_std'] = df['speedH'].rolling(window=window_size, min_periods=2).std()
+        
+        # 4. 基于车速的动态阈值
+        v0 = 20 * 5/18        # ≈5.56 m/s
+        k  = 0.008 * 3.6      # =0.0288 per m/s
+        df['lat_acc_threshold'] = df['v'].apply(
+            lambda speed: max(
+                1.0,                                   # 下限 1.0 m/s²
+                min(
+                    1.8,                               # 上限 1.8 m/s²
+                    1.8 - k * (speed - v0)             # 线性递减
+                )
+            )
+        )
+        
+        df['speedH_threshold'] = df['v'].apply(
+            lambda speed: max(1.5, min(3.0, 2.0 * (1 + (speed - 20) / 60)))
+        )
+        
+        # 5. 综合判断晃动条件
+        # 条件A: 横向加速度超过阈值
+        condition_A = df['lat_acc'].abs() > df['lat_acc_threshold']
+        
+        # 条件B: 横向加速度变化率超过阈值
+        lat_acc_rate_threshold = 0.5  # 横向加速度变化率阈值 (m/s³)
+        condition_B = df['lat_acc_rate'].abs() > lat_acc_rate_threshold
+        
+        # 条件C: 横摆角速度有明显变化但不呈现周期性
+        condition_C = (df['speedH_std'] > df['speedH_threshold'])
         
-        # 标记超过阈值的点
-        df['shake_flag'] = (abs(df['lat_acc']) > shake_threshold).astype(int)
+        # 综合条件: 满足条件A,且满足条件B或条件C
+        shake_condition = condition_A & (condition_B | condition_C)
         
-        # 检测连续的晃动事件
+        # 6. 使用向量化操作检测连续事件
+        event_groups = (shake_condition != shake_condition.shift()).cumsum()
         shake_events = []
-        in_event = False
-        start_idx = 0
         
-        for i, row in df.iterrows():
-            if row['shake_flag'] == 1 and not in_event:
-                # 开始新的晃动事件
-                in_event = True
-                start_idx = i
-            elif row['shake_flag'] == 0 and in_event:
-                # 结束当前晃动事件
-                in_event = False
-                end_idx = i - 1
-                
-                # 计算事件持续时间
-                start_time = df.loc[start_idx, 'simTime']
-                end_time = df.loc[end_idx, 'simTime']
+        for _, group in df[shake_condition].groupby(event_groups):
+            if len(group) >= 2:  # 至少2帧才算一次晃动
+                start_time = group['simTime'].iloc[0]
+                end_time = group['simTime'].iloc[-1]
                 duration = end_time - start_time
                 
-                # 如果持续时间超过阈值,记录为有效晃动事件
-                if duration >= min_duration:
+                if duration >= T_diff:  # 只记录持续时间超过阈值的事件
                     shake_events.append({
                         'start_time': start_time,
                         'end_time': end_time,
-                        'start_frame': df.loc[start_idx, 'simFrame'],
-                        'end_frame': df.loc[end_idx, 'simFrame'],
+                        'start_frame': group['simFrame'].iloc[0],
+                        'end_frame': group['simFrame'].iloc[-1],
                         'duration': duration,
-                        'max_lat_acc': df.loc[start_idx:end_idx, 'lat_acc'].abs().max()
+                        'max_lat_acc': group['lat_acc'].abs().max()
                     })
                     
                     # 添加到不舒适事件表
-                    self.discomfort_df = self.discomfort_df.append({
+                    new_row = pd.DataFrame([{
                         'start_time': start_time,
                         'end_time': end_time,
-                        'start_frame': df.loc[start_idx, 'simFrame'],
-                        'end_frame': df.loc[end_idx, 'simFrame'],
+                        'start_frame': group['simFrame'].iloc[0],
+                        'end_frame': group['simFrame'].iloc[-1],
                         'type': 'shake'
-                    }, ignore_index=True)
-        
-        # 如果最后一个事件没有结束,检查它
-        if in_event:
-            end_idx = len(df) - 1
-            start_time = df.loc[start_idx, 'simTime']
-            end_time = df.loc[end_idx, 'simTime']
-            duration = end_time - start_time
-            
-            if duration >= min_duration:
-                shake_events.append({
-                    'start_time': start_time,
-                    'end_time': end_time,
-                    'start_frame': df.loc[start_idx, 'simFrame'],
-                    'end_frame': df.loc[end_idx, 'simFrame'],
-                    'duration': duration,
-                    'max_lat_acc': df.loc[start_idx:end_idx, 'lat_acc'].abs().max()
-                })
-                
-                # 添加到不舒适事件表
-                self.discomfort_df = self.discomfort_df.append({
-                    'start_time': start_time,
-                    'end_time': end_time,
-                    'start_frame': df.loc[start_idx, 'simFrame'],
-                    'end_frame': df.loc[end_idx, 'simFrame'],
-                    'type': 'shake'
-                }, ignore_index=True)
+                    }])
+                    self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
         
         # 更新晃动计数
         self.shake_count = len(shake_events)
         self.logger.info(f"检测到 {self.shake_count} 次晃动事件")
-    
-    def _cadence_detector(self):
-        """检测顿挫事件"""
-        # 获取数据
-        df = self.ego_df.copy()
-        
-        # 检查是否有必要的列
-        if 'cadence' not in df.columns:
-            self.logger.warning("缺少计算顿挫指标所需的数据列")
-            return
-            
-        # 设置顿挫检测参数
-        min_duration = 0.3  # 最小持续时间 秒
         
-        # 检测连续的顿挫事件
-        cadence_events = []
-        in_event = False
-        start_idx = 0
+        # 更新ego_df中的相关列
+        self.ego_df = df.copy()
         
-        for i, row in df.iterrows():
-            if not pd.isna(row['cadence']) and not in_event:
-                # 开始新的顿挫事件
-                in_event = True
-                start_idx = i
-                current_direction = np.sign(row['cadence'])
-            elif (pd.isna(row['cadence']) or np.sign(row['cadence']) != current_direction) and in_event:
-                # 结束当前顿挫事件
-                in_event = False
-                end_idx = i - 1
-                
-                # 计算事件持续时间
-                start_time = df.loc[start_idx, 'simTime']
-                end_time = df.loc[end_idx, 'simTime']
-                duration = end_time - start_time
-                
-                # 如果持续时间超过阈值,记录为有效顿挫事件
-                if duration >= min_duration:
-                    cadence_events.append({
-                        'start_time': start_time,
-                        'end_time': end_time,
-                        'start_frame': df.loc[start_idx, 'simFrame'],
-                        'end_frame': df.loc[end_idx, 'simFrame'],
-                        'duration': duration,
-                        'direction': 'acceleration' if current_direction > 0 else 'deceleration'
-                    })
-                    
-                    # 添加到不舒适事件表
-                    self.discomfort_df = self.discomfort_df.append({
-                        'start_time': start_time,
-                        'end_time': end_time,
-                        'start_frame': df.loc[start_idx, 'simFrame'],
-                        'end_frame': df.loc[end_idx, 'simFrame'],
-                        'type': 'cadence'
-                    }, ignore_index=True)
+        # 保存晃动事件数据到CSV文件,用于后续图表生成
+        if shake_events:
+            try:
+                # 保存用于绘图的数据
+                self.shake_events = shake_events
+                self.logger.info(f"检测到 {len(shake_events)} 个晃动事件,已保存用于图表生成")
+            except Exception as e:
+                self.logger.error(f"保存晃动事件数据失败: {str(e)}", exc_info=True)
         
-        # 如果最后一个事件没有结束,检查它
-        if in_event:
-            end_idx = len(df) - 1
-            start_time = df.loc[start_idx, 'simTime']
-            end_time = df.loc[end_idx, 'simTime']
-            duration = end_time - start_time
+        return shake_events
+    
+    
+    def _cadence_detector(self):
+        """
+        顿挫检测:短时间内出现多次加速度突变(cadence),例如频繁加减速。
+        - 特征点定义为 cadence 变化值非 0 的时刻;
+        - 聚类这些特征点,1s 内聚为一组,去除小于 1 个点的组;
+        - 统计强度与频率,记录不适事件。
+        """
+        # 检查必要字段是否存在
+        required_fields = ['simTime', 'simFrame', 'cadence']
+        acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in self.ego_df.columns else 'lon_acc'
+        
+        if not all(field in self.ego_df.columns for field in required_fields + [acc_field]):
+            missing_fields = [field for field in required_fields + [acc_field] if field not in self.ego_df.columns]
+            self.logger.warning(f"顿挫检测缺少必要字段: {missing_fields},无法进行检测")
+            self.cadence_count = 0
+            return []
             
-            if duration >= min_duration:
-                cadence_events.append({
-                    'start_time': start_time,
-                    'end_time': end_time,
-                    'start_frame': df.loc[start_idx, 'simFrame'],
-                    'end_frame': df.loc[end_idx, 'simFrame'],
-                    'duration': duration,
-                    'direction': 'acceleration' if current_direction > 0 else 'deceleration'
-                })
-                
-                # 添加到不舒适事件表
-                self.discomfort_df = self.discomfort_df.append({
-                    'start_time': start_time,
-                    'end_time': end_time,
-                    'start_frame': df.loc[start_idx, 'simFrame'],
-                    'end_frame': df.loc[end_idx, 'simFrame'],
-                    'type': 'cadence'
-                }, ignore_index=True)
+        # 提取必要字段
+        df = self.ego_df[['simTime', 'simFrame', acc_field, 'cadence']].copy()
+        # 重命名列以保持代码一致性
+        df.rename(columns={acc_field: 'acc_used'}, inplace=True)
+        
+        # 滤除无效cadence值
+        df = df[df['cadence'].notna()].copy()
+        df['cadence_diff'] = df['cadence'].diff()
+        df.dropna(subset=['cadence_diff'], inplace=True)
+        df = df[df['cadence_diff'] != 0]
+
+        if df.empty:
+            self.logger.info("未检测到明显cadence变化,未触发顿挫事件")
+            self.cadence_count = 0
+            return []
+
+        # 提取突变点信息
+        time_list = df['simTime'].tolist()
+        frame_list = df['simFrame'].tolist()
+
+        # 聚类突变点:按时间差小于 TIME_RANGE 分组
+        TIME_RANGE = 1.0  # 秒
+        grouped_times, grouped_frames = [], []
+        temp_times, temp_frames = [], []
+
+        for i in range(len(time_list)):
+            if not temp_times or (time_list[i] - temp_times[-1] <= TIME_RANGE):
+                temp_times.append(time_list[i])
+                temp_frames.append(frame_list[i])
+            else:
+                if len(temp_times) >= 1:
+                    grouped_times.append(temp_times)
+                    grouped_frames.append(temp_frames)
+                temp_times, temp_frames = [time_list[i]], [frame_list[i]]
+
+        if len(temp_times) >= 1:
+            grouped_times.append(temp_times)
+            grouped_frames.append(temp_frames)
+
+        # 只保留有效顿挫组
+        cadence_time_ranges = [[g[0], g[-1]] for g in grouped_times]
+        cadence_frame_ranges = [[g[0], g[-1]] for g in grouped_frames]
+
+        # 输出结果到 discomfort_df
+        if cadence_time_ranges:
+            discomfort_df = pd.DataFrame({
+                'start_time': [g[0] for g in cadence_time_ranges],
+                'end_time': [g[1] for g in cadence_time_ranges],
+                'start_frame': [g[0] for g in cadence_frame_ranges],
+                'end_frame': [g[1] for g in cadence_frame_ranges],
+                'type': 'cadence'
+            })
+            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        # 计算强度与频率(用于量化)
+        stre_list, freq_list = [], []
+        for group_times in grouped_times:
+            g_df = df[df['simTime'].isin(group_times)]
+            strength = g_df['acc_used'].abs().mean()
+            stre_list.append(strength)
+
+            if len(group_times) > 1:
+                t_delta = group_times[-1] - group_times[0]
+                freq = len(group_times) / t_delta if t_delta > 0 else 0
+                freq_list.append(freq)
+
+        # 存储检测统计
+        self.cadence_count = len(cadence_time_ranges)
+        cadence_strength = np.mean(stre_list) if stre_list else 0
+        self.logger.info(f"检测到 {self.cadence_count} 次顿挫事件,平均强度:{cadence_strength:.2f}")
         
-        # 更新顿挫计数
-        self.cadence_count = len(cadence_events)
-        self.logger.info(f"检测到 {self.cadence_count} 次顿挫事件")
-    
+        # 记录使用的加速度字段
+        self.logger.info(f"顿挫检测使用的加速度字段: {acc_field}")
+
+        return cadence_time_ranges
+
     def _slam_brake_detector(self):
         """检测急刹车事件"""
         # 获取数据
@@ -1178,23 +1398,27 @@ class ComfortCalculator:
                 
                 # 如果持续时间超过阈值,记录为有效急刹车事件
                 if duration >= min_duration:
+                    # 确定使用的加速度字段
+                    acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in df.columns else 'lon_acc'
+                    
                     slam_brake_events.append({
                         'start_time': start_time,
                         'end_time': end_time,
                         'start_frame': df.loc[start_idx, 'simFrame'],
                         'end_frame': df.loc[end_idx, 'simFrame'],
                         'duration': duration,
-                        'min_lon_acc': df.loc[start_idx:end_idx, 'lon_acc'].min()
+                        'min_lon_acc': df.loc[start_idx:end_idx, acc_field].min()
                     })
                     
                     # 添加到不舒适事件表
-                    self.discomfort_df = self.discomfort_df.append({
+                    new_row = pd.DataFrame([{
                         'start_time': start_time,
                         'end_time': end_time,
                         'start_frame': df.loc[start_idx, 'simFrame'],
                         'end_frame': df.loc[end_idx, 'simFrame'],
                         'type': 'slam_brake'
-                    }, ignore_index=True)
+                    }])
+                    self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
         
         # 如果最后一个事件没有结束,检查它
         if in_event:
@@ -1204,23 +1428,27 @@ class ComfortCalculator:
             duration = end_time - start_time
             
             if duration >= min_duration:
+                # 确定使用的加速度字段
+                acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in df.columns else 'lon_acc'
+                
                 slam_brake_events.append({
                     'start_time': start_time,
                     'end_time': end_time,
                     'start_frame': df.loc[start_idx, 'simFrame'],
                     'end_frame': df.loc[end_idx, 'simFrame'],
                     'duration': duration,
-                    'min_lon_acc': df.loc[start_idx:end_idx, 'lon_acc'].min()
+                    'min_lon_acc': df.loc[start_idx:end_idx, acc_field].min()
                 })
                 
                 # 添加到不舒适事件表
-                self.discomfort_df = self.discomfort_df.append({
+                new_row = pd.DataFrame([{
                     'start_time': start_time,
                     'end_time': end_time,
                     'start_frame': df.loc[start_idx, 'simFrame'],
                     'end_frame': df.loc[end_idx, 'simFrame'],
                     'type': 'slam_brake'
-                }, ignore_index=True)
+                }])
+                self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
         
         # 更新急刹车计数
         self.slam_brake_count = len(slam_brake_events)
@@ -1261,23 +1489,27 @@ class ComfortCalculator:
                 
                 # 如果持续时间超过阈值,记录为有效急加速事件
                 if duration >= min_duration:
+                    # 确定使用的加速度字段
+                    acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in df.columns else 'lon_acc'
+                    
                     slam_accel_events.append({
                         'start_time': start_time,
                         'end_time': end_time,
                         'start_frame': df.loc[start_idx, 'simFrame'],
                         'end_frame': df.loc[end_idx, 'simFrame'],
                         'duration': duration,
-                        'max_lon_acc': df.loc[start_idx:end_idx, 'lon_acc'].max()
+                        'max_lon_acc': df.loc[start_idx:end_idx, acc_field].max()
                     })
                     
                     # 添加到不舒适事件表
-                    self.discomfort_df = self.discomfort_df.append({
+                    new_row = pd.DataFrame([{
                         'start_time': start_time,
                         'end_time': end_time,
                         'start_frame': df.loc[start_idx, 'simFrame'],
                         'end_frame': df.loc[end_idx, 'simFrame'],
                         'type': 'slam_accel'
-                    }, ignore_index=True)
+                    }])
+                    self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
         
         # 如果最后一个事件没有结束,检查它
         if in_event:
@@ -1287,24 +1519,41 @@ class ComfortCalculator:
             duration = end_time - start_time
             
             if duration >= min_duration:
+                # 确定使用的加速度字段
+                acc_field = 'lon_acc_vehicle' if 'lon_acc_vehicle' in df.columns else 'lon_acc'
+                
                 slam_accel_events.append({
                     'start_time': start_time,
                     'end_time': end_time,
                     'start_frame': df.loc[start_idx, 'simFrame'],
                     'end_frame': df.loc[end_idx, 'simFrame'],
                     'duration': duration,
-                    'max_lon_acc': df.loc[start_idx:end_idx, 'lon_acc'].max()
+                    'max_lon_acc': df.loc[start_idx:end_idx, acc_field].max()
                 })
                 
                 # 添加到不舒适事件表
-                self.discomfort_df = self.discomfort_df.append({
+                new_row = pd.DataFrame([{
                     'start_time': start_time,
                     'end_time': end_time,
                     'start_frame': df.loc[start_idx, 'simFrame'],
                     'end_frame': df.loc[end_idx, 'simFrame'],
                     'type': 'slam_accel'
-                }, ignore_index=True)
+                }])
+                self.discomfort_df = pd.concat([self.discomfort_df, new_row], ignore_index=True)
         
         # 更新急加速计数
         self.slam_accel_count = len(slam_accel_events)
-        self.logger.info(f"检测到 {self.slam_accel_count} 次急加速事件")
+        self.logger.info(f"检测到 {self.slam_accel_count} 次急加速事件")
+class ComfortManager:
+    """舒适性指标计算主类"""
+    
+    def __init__(self, data_processed):
+        self.data = data_processed
+        self.logger = LogManager().get_logger()
+        self.registry = ComfortRegistry(self.data)
+
+    def report_statistic(self):
+        """生成舒适性评分报告"""
+        comfort_result = self.registry.batch_execute()
+        
+        return comfort_result

+ 8 - 2
modules/metric/efficient.py

@@ -303,8 +303,14 @@ def acceleration_smoothness(self):
             float: 加速度平稳度 (0-1之间的比率,越接近1表示越平稳)
         """
         # 获取加速度数据
-        # 如果有直接的加速度数据,则使用;否则从速度计算
-        if 'accelX' in self.ego_df.columns and 'accelY' in self.ego_df.columns:
+        # 优先使用车辆坐标系下的加速度数据
+        if 'lon_acc_vehicle' in self.ego_df.columns and 'lat_acc_vehicle' in self.ego_df.columns:
+            # 使用车辆坐标系下的加速度计算合成加速度
+            lon_acc = self.ego_df['lon_acc_vehicle'].values
+            lat_acc = self.ego_df['lat_acc_vehicle'].values
+            accel_magnitude = np.sqrt(lon_acc**2 + lat_acc**2)
+            self.logger.info("使用车辆坐标系下的加速度计算合成加速度")
+        elif 'accelX' in self.ego_df.columns and 'accelY' in self.ego_df.columns:
             # 计算合成加速度(考虑X和Y方向)
             accel_x = self.ego_df['accelX'].values
             accel_y = self.ego_df['accelY'].values

+ 19 - 15
modules/metric/function.py

@@ -133,6 +133,10 @@ def latestWarningDistance_TTC_LST(data) -> dict:
     # 处理无效的TTC值
     for i in range(len(ttc)):
         ttc[i] = float(value) if (not ttc[i] or ttc[i] < 0) else ttc[i]
+    
+    # 生成图表数据
+    from modules.lib.chart_generator import generate_function_chart_data
+    generate_function_chart_data(data, 'latestWarningDistance_TTC_LST')
         
     return {"latestWarningDistance_TTC_LST": float(ttc[-1]) if len(ttc) > 0 else value}
 
@@ -159,21 +163,21 @@ def earliestWarningDistance_TTC_LST(data) -> dict:
     return {"earliestWarningDistance_TTC_LST": float(ttc[0]) if len(ttc) > 0 else value}
 
 
-# def warningDelayTime_LST(data):
-#     scenario_name = data.function_config["function"]["scenario"]["name"]
-#     correctwarning = scenario_sign_dict[scenario_name]
-#     ego_df = data.ego_data
-#     HMI_warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning)]['simTime'].tolist()
-#     simTime_HMI = HMI_warning_rows[0] if len(HMI_warning_rows) > 0 else None
-#     rosbag_warning_rows = ego_df[(ego_df['event_Type'].notna()) & ((ego_df['event_Type'] != np.nan))][
-#         'simTime'].tolist()
-#     simTime_rosbag = rosbag_warning_rows[0] if len(rosbag_warning_rows) > 0 else None
-#     if (simTime_HMI is None) or (simTime_rosbag is None):
-#         print("预警出错!")
-#         delay_time = 100.0
-#     else:
-#         delay_time = abs(simTime_HMI - simTime_rosbag)
-#     return {"warningDelayTime_LST": delay_time}
+def warningDelayTime_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    HMI_warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning)]['simTime'].tolist()
+    simTime_HMI = HMI_warning_rows[0] if len(HMI_warning_rows) > 0 else None
+    rosbag_warning_rows = ego_df[(ego_df['event_Type'].notna()) & ((ego_df['event_Type'] != np.nan))][
+        'simTime'].tolist()
+    simTime_rosbag = rosbag_warning_rows[0] if len(rosbag_warning_rows) > 0 else None
+    if (simTime_HMI is None) or (simTime_rosbag is None):
+        print("预警出错!")
+        delay_time = 100.0
+    else:
+        delay_time = abs(simTime_HMI - simTime_rosbag)
+    return {"warningDelayTime_LST": delay_time}
 
 
 def warningDelayTimeofReachDecel_LST(data):

+ 278 - 133
modules/metric/safety.py

@@ -4,15 +4,19 @@
 安全指标计算模块
 """
 
+import os
 import numpy as np
 import pandas as pd
 import math
+import matplotlib.pyplot as plt
+import scipy.integrate as spi
 from collections import defaultdict
 from typing import Dict, Any, List, Optional
-import ast
+from pathlib import Path
 
 from modules.lib.score import Score
 from modules.lib.log_manager import LogManager
+from modules.lib.chart_generator import generate_safety_chart_data
 
 # 安全指标相关常量
 SAFETY_INFO = [
@@ -27,12 +31,7 @@ SAFETY_INFO = [
     "accelX",
     "accelY",
     "v",
-    "type",
-    "lane_width",
-    "lane_type",
-    "road_type",
-    "lane_coords",
-    "link_coords"
+    "type"
 ]
 
 # ----------------------
@@ -45,6 +44,9 @@ def calculate_ttc(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         ttc_value = safety.get_ttc_value()
+        # 只生成图表,数据导出由chart_generator处理
+        if safety.ttc_data:
+            safety.generate_metric_chart('TTC')
         LogManager().get_logger().info(f"安全指标[TTC]计算结果: {ttc_value}")
         return {"TTC": ttc_value}
     except Exception as e:
@@ -58,6 +60,8 @@ def calculate_mttc(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         mttc_value = safety.get_mttc_value()
+        if safety.mttc_data:
+            safety.generate_metric_chart('MTTC')
         LogManager().get_logger().info(f"安全指标[MTTC]计算结果: {mttc_value}")
         return {"MTTC": mttc_value}
     except Exception as e:
@@ -71,6 +75,8 @@ def calculate_thw(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         thw_value = safety.get_thw_value()
+        if safety.thw_data:
+            safety.generate_metric_chart('THW')
         LogManager().get_logger().info(f"安全指标[THW]计算结果: {thw_value}")
         return {"THW": thw_value}
     except Exception as e:
@@ -84,6 +90,8 @@ def calculate_tlc(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         tlc_value = safety.get_tlc_value()
+        if safety.tlc_data:
+            safety.generate_metric_chart('TLC')
         LogManager().get_logger().info(f"安全指标[TLC]计算结果: {tlc_value}")
         return {"TLC": tlc_value}
     except Exception as e:
@@ -97,6 +105,8 @@ def calculate_ttb(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         ttb_value = safety.get_ttb_value()
+        if safety.ttb_data:
+            safety.generate_metric_chart('TTB')
         LogManager().get_logger().info(f"安全指标[TTB]计算结果: {ttb_value}")
         return {"TTB": ttb_value}
     except Exception as e:
@@ -110,25 +120,14 @@ def calculate_tm(data_processed) -> dict:
     try:
         safety = SafetyCalculator(data_processed)
         tm_value = safety.get_tm_value()
+        if safety.tm_data:
+            safety.generate_metric_chart('TM')
         LogManager().get_logger().info(f"安全指标[TM]计算结果: {tm_value}")
         return {"TM": tm_value}
     except Exception as e:
         LogManager().get_logger().error(f"TM计算异常: {str(e)}", exc_info=True)
         return {"TM": None}
 
-# def calculate_MPrTTC(data_processed) -> dict:
-#     """计算MPrTTC (Model Predictive Time-to-Collision)"""
-#     if data_processed is None or not hasattr(data_processed, 'object_df'):
-#         return {"MPrTTC": None}
-#     try:
-#         safety = SafetyCalculator(data_processed)
-#         mprttc_value = safety.get_mprttc_value()
-#         LogManager().get_logger().info(f"安全指标[MPrTTC]计算结果: {mprttc_value}")
-#         return {"MPrTTC": mprttc_value}
-#     except Exception as e:
-#         LogManager().get_logger().error(f"MPrTTC计算异常: {str(e)}", exc_info=True)
-#         return {"MPrTTC": None}
-
 def calculate_dtc(data_processed) -> dict:
     """计算DTC (Distance to Collision)"""
     if data_processed is None or not hasattr(data_processed, 'object_df'):
@@ -168,40 +167,79 @@ def calculate_psd(data_processed) -> dict:
         LogManager().get_logger().error(f"PSD计算异常: {str(e)}", exc_info=True)
         return {"PSD": None}
 
+
+
 def calculate_collisionrisk(data_processed) -> dict:
     """计算碰撞风险"""
-    safety = SafetyCalculator(data_processed)
-    collision_risk_value = safety.get_collision_risk_value()
-    LogManager().get_logger().info(f"安全指标[collisionRisk]计算结果: {collision_risk_value}")
-    return {"collisionRisk": collision_risk_value}
+    if data_processed is None or not hasattr(data_processed, 'object_df'):
+        return {"collisionRisk": None}
+    try:
+        safety = SafetyCalculator(data_processed)
+        collision_risk_value = safety.get_collision_risk_value()
+        if safety.collision_risk_data:
+            safety.generate_metric_chart('collisionRisk')
+        LogManager().get_logger().info(f"安全指标[collisionRisk]计算结果: {collision_risk_value}")
+        return {"collisionRisk": collision_risk_value}
+    except Exception as e:
+        LogManager().get_logger().error(f"collisionRisk计算异常: {str(e)}", exc_info=True)
+        return {"collisionRisk": None}
 
 def calculate_lonsd(data_processed) -> dict:
     """计算纵向安全距离"""
     safety = SafetyCalculator(data_processed)
     lonsd_value = safety.get_lonsd_value()
+    if safety.lonsd_data:
+        safety.generate_metric_chart('LonSD')
     LogManager().get_logger().info(f"安全指标[LonSD]计算结果: {lonsd_value}")
     return {"LonSD": lonsd_value}
 
 def calculate_latsd(data_processed) -> dict:
     """计算横向安全距离"""
-    safety = SafetyCalculator(data_processed)
-    latsd_value = safety.get_latsd_value()
-    LogManager().get_logger().info(f"安全指标[LatSD]计算结果: {latsd_value}")
-    return {"LatSD": latsd_value}
+    if data_processed is None or not hasattr(data_processed, 'object_df'):
+        return {"LatSD": None}
+    try:
+        safety = SafetyCalculator(data_processed)
+        latsd_value = safety.get_latsd_value()
+        if safety.latsd_data:
+            # 只生成图表,数据导出由chart_generator处理
+            safety.generate_metric_chart('LatSD')
+        LogManager().get_logger().info(f"安全指标[LatSD]计算结果: {latsd_value}")
+        return {"LatSD": latsd_value}
+    except Exception as e:
+        LogManager().get_logger().error(f"LatSD计算异常: {str(e)}", exc_info=True)
+        return {"LatSD": None}
 
 def calculate_btn(data_processed) -> dict:
     """计算制动威胁数"""
-    safety = SafetyCalculator(data_processed)
-    btn_value = safety.get_btn_value()
-    LogManager().get_logger().info(f"安全指标[BTN]计算结果: {btn_value}")
-    return {"BTN": btn_value}
+    if data_processed is None or not hasattr(data_processed, 'object_df'):
+        return {"BTN": None}
+    try:
+        safety = SafetyCalculator(data_processed)
+        btn_value = safety.get_btn_value()
+        if safety.btn_data:
+            # 只生成图表,数据导出由chart_generator处理
+            safety.generate_metric_chart('BTN')
+        LogManager().get_logger().info(f"安全指标[BTN]计算结果: {btn_value}")
+        return {"BTN": btn_value}
+    except Exception as e:
+        LogManager().get_logger().error(f"BTN计算异常: {str(e)}", exc_info=True)
+        return {"BTN": None}
 
 def calculate_collisionseverity(data_processed) -> dict:
     """计算碰撞严重性"""
-    safety = SafetyCalculator(data_processed)
-    collision_severity_value = safety.get_collision_severity_value()
-    LogManager().get_logger().info(f"安全指标[collisionSeverity]计算结果: {collision_severity_value}")
-    return {"collisionSeverity": collision_severity_value}
+    if data_processed is None or not hasattr(data_processed, 'object_df'):
+        return {"collisionSeverity": None}
+    try:
+        safety = SafetyCalculator(data_processed)
+        collision_severity_value = safety.get_collision_severity_value()
+        if safety.collision_severity_data:
+            # 只生成图表,数据导出由chart_generator处理
+            safety.generate_metric_chart('collisionSeverity')
+        LogManager().get_logger().info(f"安全指标[collisionSeverity]计算结果: {collision_severity_value}")
+        return {"collisionSeverity": collision_severity_value}
+    except Exception as e:
+        LogManager().get_logger().error(f"collisionSeverity计算异常: {str(e)}", exc_info=True)
+        return {"collisionSeverity": None}
 
 
 class SafetyRegistry:
@@ -276,7 +314,6 @@ class SafetyCalculator:
         self.df = data_processed.object_df.copy()
         self.ego_df = data_processed.ego_data.copy()  # 使用copy()避免修改原始数据
         self.obj_id_list = data_processed.obj_id_list
-
         self.metric_list = [
             'TTC', 'MTTC', 'THW', 'TLC', 'TTB', 'TM', 'DTC', 'PET', 'PSD', 'LonSD', 'LatSD', 'BTN', 'collisionRisk', 'collisionSeverity'
         ]
@@ -303,6 +340,26 @@ class SafetyCalculator:
         self.frame_list = self.ego_df['simFrame'].values.tolist()
         self.collisionRisk = 0
         self.empty_flag = True
+        
+        # 初始化数据存储列表
+        self.ttc_data = []
+        self.mttc_data = []
+        self.thw_data = []
+        self.tlc_data = []
+        self.ttb_data = []
+        self.tm_data = []
+        self.lonsd_data = []
+        self.latsd_data = []
+        self.btn_data = []
+        self.collision_risk_data = []
+        self.collision_severity_data = []
+        
+        # 初始化安全事件记录表
+        self.unsafe_events_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
+        
+        # 设置输出目录
+        self.output_dir = os.path.join(os.getcwd(), 'data')
+        os.makedirs(self.output_dir, exist_ok=True)
 
         self.logger.info("SafetyCalculator初始化完成,场景中包含自车的目标物一共为: %d", len(self.obj_id_list))
 
@@ -332,9 +389,21 @@ class SafetyCalculator:
         ego_decel_min = self.data_processed.vehicle_config["EGO_DECEL_MIN"]
         ego_decel_lon_max = self.data_processed.vehicle_config["EGO_DECEL_LON_MAX"]
         ego_decel_lat_max = self.data_processed.vehicle_config["EGO_DECEL_LAT_MAX"]
-        driver_reaction_time = self.data_processed.vehicle_config["RHO"]
         ego_decel_max = np.sqrt(ego_decel_lon_max ** 2 + ego_decel_lat_max ** 2)
-        x_relative_start_dist = self.ego_df["x_relative_dist"]
+        #TEMP_COMMENT: x_relative_start_dist 注释开始
+        #x_relative_start_dist = self.ego_df["x_relative_start_dist"]
+        
+        # 设置安全指标阈值
+        self.safety_thresholds = {
+            'TTC': {'min': 1.5, 'max': None},  # TTC小于1.5秒视为危险
+            'MTTC': {'min': 1.5, 'max': None},  # MTTC小于1.5秒视为危险
+            'THW': {'min': 1.0, 'max': None},  # THW小于1.0秒视为危险
+            'LonSD': {'min': None, 'max': None},  # 根据实际情况设置
+            'LatSD': {'min': 0.5, 'max': None},  # LatSD小于0.5米视为危险
+            'BTN': {'min': None, 'max': 0.8},  # BTN大于0.8视为危险
+            'collisionRisk': {'min': None, 'max': 30},  # 碰撞风险大于30%视为危险
+            'collisionSeverity': {'min': None, 'max': 30}  # 碰撞严重性大于30%视为危险
+        }
 
         obj_dict = defaultdict(dict)
         obj_data_dict = self.df.to_dict('records')
@@ -343,8 +412,7 @@ class SafetyCalculator:
 
         df_list = []
         EGO_PLAYER_ID = 1
-        ramp_poss = self.ego_df[self.ego_df["link_type"] == 19]["link_coords"].drop_duplicates().tolist() # 寻找匝道的位置坐标
-        lane_poss = self.ego_df[self.ego_df["lane_type"] == 2]["lane_coords"].drop_duplicates().tolist() # 寻找匝道的位置坐标
+
         for frame_num in self.frame_list:
             ego_data = obj_dict[frame_num][EGO_PLAYER_ID]
             v1 = ego_data['v']
@@ -427,6 +495,7 @@ class SafetyCalculator:
                 # MPrTTC = self._cal_MPrTTC(x_relative_start_dist)
                 # PET = self._cal_PET(lane_posx1, lane_posy1, lane_posx2, lane_posy2, ramp_posx1, ramp_posy1, ramp_posx2, ramp_posy2, ego_posx, ego_posy, obj_posx, obj_posy, lane_width, delta_t, v1, v2, a1, a2)
                 PET = None
+
                 for lane_pos in lane_poss:
                     lane_posx1 = ast.literal_eval(lane_pos)[0][0]
                     lane_posy1 = ast.literal_eval(lane_pos)[0][1]
@@ -447,8 +516,6 @@ class SafetyCalculator:
                                             ramp_posy2, ego_posx, ego_posy, obj_posx, obj_posy, lane_width, delta_t, v1, v2, a1, a2)
                 PSD = self._cal_PSD(x_relative_start_dist, v1, ego_decel_lon_max)
 
-
-
                 LonSD = self._cal_longitudinal_safe_dist(v_ego_p, v_obj_p, rho, ego_accel_max, ego_decel_min, obj_decel_max)
 
                 lat_dist = 0.5
@@ -501,9 +568,16 @@ class SafetyCalculator:
                 obj_dict[frame_num][playerId]['LatSD'] = LatSD
                 obj_dict[frame_num][playerId]['BTN'] = abs(BTN)
 
-                collisionSeverity = 0
-                pr_death = 0
-                collisionRisk = 0
+               # TTC要进行筛选,否则会出现nan或者TTC过大的情况
+                if not TTC or TTC > 4000:  # threshold = 4258.41
+                    collisionSeverity = 0
+                    pr_death = 0
+                    collisionRisk = 0
+                else:
+                    result, error = spi.quad(self._normal_distribution, 0, TTC - Tc)
+                    collisionSeverity = 1 - result
+                    pr_death = self._death_pr(obj_type, vrel_projection_in_dist)
+                    collisionRisk = 0.4 * pr_death + 0.6 * collisionSeverity
 
                 obj_dict[frame_num][playerId]['collisionSeverity'] = collisionSeverity * 100
                 obj_dict[frame_num][playerId]['pr_death'] = pr_death * 100
@@ -518,28 +592,6 @@ class SafetyCalculator:
                     'collisionSeverity', 'pr_death', 'collisionRisk']
         self.df_safe = df_safe[col_list].reset_index(drop=True)
 
-    # 计算车辆从非匀速达到匀速的反应时间
-    def _cal_reaction_time_to_avgspeed(self, ego_df, threshold = 0.01):
-        ego_df = ego_df.reset_index(drop=True)
-        ego_df['v_change'] = ego_df['v'].diff()
-        # 初始化结果列表
-        uniform_speed_segments = []
-        start_index = 0
-
-        # 遍历数据,找出匀速部分
-        for i in range(1, len(ego_df)):
-            if ego_df['v_change'].iloc[i] > threshold:
-                if i - start_index > 1:  # 至少有两个数据点才能形成一个匀速段
-                    uniform_speed_segments.append(
-                        (ego_df.iloc[start_index]['simTime'], ego_df.iloc[i - 1]['simTime'], ego_df.iloc[start_index]['v']))
-                start_index = i
-
-        # 检查最后一个段
-        if len(ego_df) - start_index > 1:
-            uniform_speed_segments.append(
-                (ego_df.iloc[start_index]['simTime'], ego_df.iloc[-1]['simTime'], ego_df.iloc[start_index]['v']))
-        return uniform_speed_segments[-1][0] - ego_df['simTime'].iloc[0]
-
     def _cal_v_ego_projection(self, dx, dy, v_x1, v_y1):
         # 计算 AB 连线的向量 AB
         # dx = x2 - x1
@@ -651,27 +703,29 @@ class SafetyCalculator:
     def dist(self, x1, y1, x2, y2):
         dist = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
         return dist
-
-    # 求车辆与道路之间的横向距离
-    def horizontal_distance(self, posx1, posy1, posx2, posy2, posx, posy):
-        dist = np.sqrt((posx2 - posx1)(posy - posy1) - (posy2 - posy1)(posx - posy1))/np.sqrt((posx2 - posx1)**2 + (posy2 - posy1)**2)
-        return dist
-
-    # 求车辆与道路之间的纵向距离
-    def _is_alone_the_road(self, posx1, posy1, posx2, posy2, posx, posy):
-        pro_car = (posx2 - posx1)(posx - posx1) + (posy2 - posy1)(posy - posy1)
-        if pro_car > 0:
-            return True
-        else:
-            return False
-
-    def _is_in_the_road(self, posx1, posy1, posx2, posy2, posx, posy):
-        pro_obj1 = (posx2 - posx1)(posx - posx1) + (posy2 - posy1)(posy - posy1)
-        pro_obj2 = (posx1 - posx2)(posx - posx2) + (posy1 - posy2)(posy - posy2)
-        if pro_obj1 > 0 and pro_obj2 > 0:
-            return True
-        else:
-            return False
+        
+    
+    def generate_metric_chart(self, metric_name: str) -> None:
+        """生成指标图表
+        
+        Args:
+            metric_name: 指标名称
+        """
+        try:
+            # 确定输出目录
+            if self.output_dir is None:
+                self.output_dir = os.path.join(os.getcwd(), 'data')
+                os.makedirs(self.output_dir, exist_ok=True)
+            
+            # 调用图表生成函数
+            chart_path = generate_safety_chart_data(self, metric_name, self.output_dir)
+            if chart_path:
+                self.logger.info(f"{metric_name}图表已生成: {chart_path}")
+            else:
+                self.logger.warning(f"{metric_name}图表生成失败")
+                
+        except Exception as e:
+            self.logger.error(f"生成{metric_name}图表失败: {str(e)}", exc_info=True)
 
     # TTC (time to collision)
     def _cal_TTC(self, dist, vrel_projection_in_dist):
@@ -736,13 +790,30 @@ class SafetyCalculator:
         TM = (x_relative_start_dist0 + v2**2/(2*a2) - v1**2/(2*a1)) / v1
         return TM
 
-    # def _cal_MPrTTC(self, T=5, c = False, collision_dist = 5.99):
-    #     time_interval = self.ego_df['simTime'].tolist()[1] - self.ego_df['simTime'].tolist()[0]
-    #
-    #     for i in range(len(self.obj_id_list)):
-    #         for j in range(T):
-    #             MPrTTC = j * time_interval
+    def velocity(self, v_x, v_y):
+        v = math.sqrt(v_x ** 2 + v_y ** 2) * 3.6
+        return v
 
+    def _cal_longitudinal_safe_dist(self, v_ego_p, v_obj_p, rho, ego_accel_max, ego_decel_min, ego_decel_max):
+        lon_dist_min = v_ego_p * rho + ego_accel_max * (rho ** 2) / 2 + (v_ego_p + rho * ego_accel_max) ** 2 / (
+                2 * ego_decel_min) - v_obj_p ** 2 / (2 * ego_decel_max)
+        return lon_dist_min
+
+    def _cal_lateral_safe_dist(self, lat_dist, v_right, v_left, rho, a_right_lat_brake_min,
+                               a_left_lat_brake_min, a_lat_max):
+        # 检查除数是否为零
+        if a_right_lat_brake_min == 0 or a_left_lat_brake_min == 0:
+            return self._default_value('LatSD')  # 返回默认值
+            
+        v_right_rho = v_right + rho * a_lat_max
+        v_left_rho = v_left + rho * a_lat_max
+        dist_min = lat_dist + (
+            (v_right + v_right_rho) * rho / 2
+            + v_right_rho**2 / a_right_lat_brake_min / 2
+            + ((v_left + v_right_rho) * rho / 2)
+            + v_left_rho**2 / a_left_lat_brake_min / 2
+        )
+        return dist_min
     def _cal_DTC(self, v_on_dist, a_on_dist, t):
         if a_on_dist == 0:
             return None
@@ -772,33 +843,6 @@ class SafetyCalculator:
                 return PSD
             else:
                 return None
-
-
-    def velocity(self, v_x, v_y):
-        v = math.sqrt(v_x ** 2 + v_y ** 2) * 3.6
-        return v
-
-    def _cal_longitudinal_safe_dist(self, v_ego_p, v_obj_p, rho, ego_accel_max, ego_decel_min, ego_decel_max):
-        lon_dist_min = v_ego_p * rho + ego_accel_max * (rho ** 2) / 2 + (v_ego_p + rho * ego_accel_max) ** 2 / (
-                2 * ego_decel_min) - v_obj_p ** 2 / (2 * ego_decel_max)
-        return lon_dist_min
-
-    def _cal_lateral_safe_dist(self, lat_dist, v_right, v_left, rho, a_right_lat_brake_min,
-                               a_left_lat_brake_min, a_lat_max):
-        # 检查除数是否为零
-        if a_right_lat_brake_min == 0 or a_left_lat_brake_min == 0:
-            return self._default_value('LatSD')  # 返回默认值
-            
-        v_right_rho = v_right + rho * a_lat_max
-        v_left_rho = v_left + rho * a_lat_max
-        dist_min = lat_dist + (
-            (v_right + v_right_rho) * rho / 2
-            + v_right_rho**2 / a_right_lat_brake_min / 2
-            + ((v_left + v_right_rho) * rho / 2)
-            + v_left_rho**2 / a_left_lat_brake_min / 2
-        )
-        return dist_min
-
     # DRAC (decelerate required avoid collision)
     def _cal_DRAC(self, dist, vrel_projection_in_dist, len1, len2, width1, width2, o_x1, o_x2):
         dist_length = dist - (len2 / 2 - o_x2 + len1 / 2 + o_x1)  # 4.671
@@ -934,37 +978,92 @@ class SafetyCalculator:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('TTC')
         ttc_values = self.df_safe['TTC'].dropna()
-        return float(ttc_values.min()) if not ttc_values.empty else self._default_value('TTC')
+        ttc_value = float(ttc_values.min()) if not ttc_values.empty else self._default_value('TTC')
+        
+        # 收集TTC数据
+        if not ttc_values.empty:
+            self.ttc_data = []
+            for time, frame, ttc in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['TTC']):
+                if pd.notnull(ttc):
+                    self.ttc_data.append({'simTime': time, 'simFrame': frame, 'TTC': ttc})
+            
+        
+        return ttc_value
 
     def get_mttc_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('MTTC')
         mttc_values = self.df_safe['MTTC'].dropna()
-        return float(mttc_values.min()) if not mttc_values.empty else self._default_value('MTTC')
+        mttc_value = float(mttc_values.min()) if not mttc_values.empty else self._default_value('MTTC')
+        
+        # 收集MTTC数据
+        if not mttc_values.empty:
+            self.mttc_data = []
+            for time, frame, mttc in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['MTTC']):
+                if pd.notnull(mttc):
+                    self.mttc_data.append({'simTime': time, 'simFrame': frame, 'MTTC': mttc})
+        
+        return mttc_value
 
     def get_thw_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('THW')
         thw_values = self.df_safe['THW'].dropna()
-        return float(thw_values.min()) if not thw_values.empty else self._default_value('THW')
+        thw_value = float(thw_values.min()) if not thw_values.empty else self._default_value('THW')
+        
+        # 收集THW数据
+        if not thw_values.empty:
+            self.thw_data = []
+            for time, frame, thw in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['THW']):
+                if pd.notnull(thw):
+                    self.thw_data.append({'simTime': time, 'simFrame': frame, 'THW': thw})
+        
+        return thw_value
 
     def get_tlc_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('TLC')
         tlc_values = self.df_safe['TLC'].dropna()
-        return float(tlc_values.min()) if not tlc_values.empty else self._default_value('TLC')
+        tlc_value = float(tlc_values.min()) if not tlc_values.empty else self._default_value('TLC')
+        
+        # 收集TLC数据
+        if not tlc_values.empty:
+            self.tlc_data = []
+            for time, frame, tlc in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['TLC']):
+                if pd.notnull(tlc):
+                    self.tlc_data.append({'simTime': time, 'simFrame': frame, 'TLC': tlc})
+        
+        return tlc_value
 
     def get_ttb_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('TTB')
         ttb_values = self.df_safe['TTB'].dropna()
-        return float(ttb_values.min()) if not ttb_values.empty else self._default_value('TTB')
+        ttb_value = float(ttb_values.min()) if not ttb_values.empty else self._default_value('TTB')
+        
+        # 收集TTB数据
+        if not ttb_values.empty:
+            self.ttb_data = []
+            for time, frame, ttb in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['TTB']):
+                if pd.notnull(ttb):
+                    self.ttb_data.append({'simTime': time, 'simFrame': frame, 'TTB': ttb})
+        
+        return ttb_value
 
     def get_tm_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('TM')
         tm_values = self.df_safe['TM'].dropna()
-        return float(tm_values.min()) if not tm_values.empty else self._default_value('TM')
+        tm_value = float(tm_values.min()) if not tm_values.empty else self._default_value('TM')
+        
+        # 收集TM数据
+        if not tm_values.empty:
+            self.tm_data = []
+            for time, frame, tm in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['TM']):
+                if pd.notnull(tm):
+                    self.tm_data.append({'simTime': time, 'simFrame': frame, 'TM': tm})
+        
+        return tm_value
 
     def get_dtc_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
@@ -988,30 +1087,76 @@ class SafetyCalculator:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('LonSD')
         lonsd_values = self.df_safe['LonSD'].dropna()
-        return float(lonsd_values.mean()) if not lonsd_values.empty else self._default_value('LonSD')
+        lonsd_value = float(lonsd_values.mean()) if not lonsd_values.empty else self._default_value('LonSD')
+        
+        # 收集LonSD数据
+        if not lonsd_values.empty:
+            self.lonsd_data = []
+            for time, frame, lonsd in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['LonSD']):
+                if pd.notnull(lonsd):
+                    self.lonsd_data.append({'simTime': time, 'simFrame': frame, 'LonSD': lonsd})
+        
+        return lonsd_value
 
     def get_latsd_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('LatSD')
         latsd_values = self.df_safe['LatSD'].dropna()
         # 使用最小值而非平均值,与safety1.py保持一致
-        return float(latsd_values.min()) if not latsd_values.empty else self._default_value('LatSD')
+        latsd_value = float(latsd_values.min()) if not latsd_values.empty else self._default_value('LatSD')
+        
+        # 收集LatSD数据
+        if not latsd_values.empty:
+            self.latsd_data = []
+            for time, frame, latsd in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['LatSD']):
+                if pd.notnull(latsd):
+                    self.latsd_data.append({'simTime': time, 'simFrame': frame, 'LatSD': latsd})
+
+        return latsd_value
 
     def get_btn_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('BTN')
         btn_values = self.df_safe['BTN'].dropna()
-        return float(btn_values.max()) if not btn_values.empty else self._default_value('BTN')
+        btn_value = float(btn_values.max()) if not btn_values.empty else self._default_value('BTN')
+        
+        # 收集BTN数据
+        if not btn_values.empty:
+            self.btn_data = []
+            for time, frame, btn in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['BTN']):
+                if pd.notnull(btn):
+                    self.btn_data.append({'simTime': time, 'simFrame': frame, 'BTN': btn})
+
+        return btn_value
 
     def get_collision_risk_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('collisionRisk')
         risk_values = self.df_safe['collisionRisk'].dropna()
-        return float(risk_values.max()) if not risk_values.empty else self._default_value('collisionRisk')
+        risk_value = float(risk_values.max()) if not risk_values.empty else self._default_value('collisionRisk')
+        
+        # 收集碰撞风险数据
+        if not risk_values.empty:
+            self.collision_risk_data = []
+            for time, frame, risk in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['collisionRisk']):
+                if pd.notnull(risk):
+                    self.collision_risk_data.append({'simTime': time, 'simFrame': frame, 'collisionRisk': risk})
+
+        return risk_value
 
     def get_collision_severity_value(self) -> float:
         if self.empty_flag or self.df_safe is None:
             return self._default_value('collisionSeverity')
         severity_values = self.df_safe['collisionSeverity'].dropna()
-        return float(severity_values.max()) if not severity_values.empty else self._default_value('collisionSeverity')
+        severity_value = float(severity_values.max()) if not severity_values.empty else self._default_value('collisionSeverity')
+        
+        # 收集碰撞严重性数据
+        if not severity_values.empty:
+            self.collision_severity_data = []
+            for time, frame, severity in zip(self.df_safe['simTime'], self.df_safe['simFrame'], self.df_safe['collisionSeverity']):
+                if pd.notnull(severity):
+                    self.collision_severity_data.append({'simTime': time, 'simFrame': frame, 'collisionSeverity': severity})
+
+        
+        return severity_value
 

+ 1 - 1
scripts/evaluator_enhanced.py

@@ -560,7 +560,7 @@ def main():
     parser.add_argument(
         "--dataPath",
         type=str,
-        default="/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/data/V2V_CSAE53-2020_ForwardCollisionW_LST_01-01",
+        default=r"D:\Kevin\zhaoyuan\data\V2V_CSAE53-2020_ForwardCollisionW_LST_01-01",
         help="Input data directory",
     )
     parser.add_argument(