cicv 1 місяць тому
коміт
17ec99d6ec

+ 412 - 0
config/metric_config.yaml

@@ -0,0 +1,412 @@
+vehicle:
+  # 车辆参数配置
+  CAR_WIDTH: 1.872       # 车体宽度(单位:米)
+  CAR_LENGTH: 4.924      # 车体长度(单位:米)
+  CAR_HEIGHT: 1.3        # 车体高度(单位:米)新增
+  CAR_OFFX: 1.321        # 车辆长度方向参考点到几何中心点的距离(单位:米)新增
+  RHO: 0.3               # 驾驶员制动反应时间(单位:秒)
+  EGO_ACCEL_MAX: 6       # 自车油门最大加速度(单位:m/s²)
+  OBJ_DECEL_MAX: 8       # 前车刹车最大减速度(单位:m/s²)
+  EGO_DECEL_MIN: 1       # 自车刹车最小减速度(需确认单位:m/s²)
+  EGO_DECEL_LON_MAX: 8   # 自车纵向刹车最大减速度(单位:m/s²)
+  EGO_DECEL_LAT_MAX: 1   # 自车横向刹车最大减速度(单位:m/s²)
+  EGO_WHEELBASS: 2.8     # 自车轮距(单位:米)
+
+T_threshold:
+  T0_threshold: 0  # 表示T0等级的通过阈值
+  T1_threshold: 2  # 表示T1等级的通过阈值
+  T2_threshold: 5  # 表示T2等级的通过阈值
+
+
+safety:
+  name: safety
+  priority: 0
+  safeTime:
+    name: safetime
+    priority: 0
+    TTC:
+      name: TTC
+      priority: 0
+      max: 2000.0
+      min: 2.86
+    MTTC:
+      name: MTTC
+      priority: 0
+      max: 2000.0
+      min: 3.0
+    THW:
+      name: THW
+      priority: 0
+      max: 2000.0
+      min: 1.5
+  safeDistance:
+    name: safeDistance
+    priority: 0
+    # LonSD:
+    #   name: LonSD
+    #   priority: 0
+    #   max: 2000.0
+    #   min: 10.0
+    LatSD:
+      name: LatSD 
+      priority: 0
+      max: 2000.0
+      min: 2.0
+  safeAcceleration:
+    name: safeAcceleration
+    priority: 0
+    # DRAC:
+    #   name: DRAC
+    #   priority: 0
+    #   max: 5.0
+    #   min: -2000.0
+    BTN:
+      name: BTN
+      priority: 0
+      max: 1.0
+      min: -2000.0
+    # STN:
+    #   name: STN
+    #   priority: 0
+    #   max: 1.0
+    #   min: -2000.0
+  safeProbability:
+    name: safeProbability
+    priority: 0
+    collisionRisk:
+      name: collisionRisk
+      priority: 0
+      max: 10.0
+      min: 0.0
+    collisionSeverity:
+      name: collisionSeverity
+      priority: 0
+      max: 10.0
+      min: 0.0
+comfort:
+  name: comfort
+  priority: 0
+  comfortLat:
+    name: comfortLat
+    priority: 0
+    Weaving:
+      name: Weaving
+      priority: 0
+      max: 0
+      min: 0
+    shake:
+      name: shake
+      priority: 0
+      max: 0
+      min: 0
+  comfortLon:
+    name: comfortLon
+    priority: 0
+    cadence:
+      name: cadence
+      priority: 0
+      max: 0
+      min: 0
+    slamBrake:
+      name: slamBrake 
+      priority: 0
+      max: 0
+      min: 0
+    slamAccelerate:
+      name: slamAccelerate
+      priority: 0
+      max: 0
+      min: 0
+efficient:
+  name: efficient
+  priority: 0
+  drivingMode:
+    name: drivingMode
+    priority: 0
+    max_speed:
+      name: maxSpeed
+      priority: 0
+      max: 0.0
+      min: 0.0
+    devation_speed:
+      name: deviationSpeed
+      priority: 0
+      max: 0.0
+      min: 0.0
+    averagedSpeed:
+      name: averagedSpeed
+      priority: 0
+      max: 80.0
+      min: 30.0
+  parkingMode:
+    name: parkingMode
+    priority: 0
+    stopDuration:
+      name: stopDuration
+      priority: 0
+      max: 1
+      min: 0
+function:
+  name: function
+  priority: 0
+  scenario:
+    name: ForwardCollision
+    priority: 0
+    latestWarningDistance_TTC_LST:
+      name: latestWarningDistance_TTC_LST
+      priority: 0
+      max: 3.11
+      min: 1.89
+    earliestWarningDistance_TTC_LST:
+      name: earliestWarningDistance_TTC_LST
+      priority: 0
+      max: 3.11
+      min: 1.89
+    latestWarningDistance_LST:
+      name: latestWarningDistance_LST
+      priority: 0
+      max: 17.29
+      min: 10.51
+    earliestWarningDistance_LST:
+      name: earliestWarningDistance_LST
+      priority: 0
+      max: 17.29
+      min: 10.51
+traffic:
+  name: traffic
+  priority: 0
+  #重大违规 12分
+  majorViolation:
+    name: majorViolation
+    priority: 0
+    #urbanExpresswayOrHighwaySpeedOverLimit50:表示在高速公路或城市快速路上,机动车驾驶人超速50%以上的;
+    urbanExpresswayOrHighwaySpeedOverLimit50:
+      name: urbanExpresswayOrHighwaySpeedOverLimit50
+      priority: 0
+      max: 0
+      min: 0
+    #urbanExpresswayOrHighwayReverse:表示在高速公路或城市快速路上,机动车倒车行驶;
+    urbanExpresswayOrHighwayReverse:
+      name: higwayreverse
+      priority: 0
+      max: 0
+      min: 0
+    #urbanExpresswayOrHighwayDrivingAgainst:表示在高速公路或城市快速路上,机动车逆行;
+    urbanExpresswayOrHighwayDrivingAgainst:
+      name: higwayDrivingAgainst
+      priority: 0
+      max: 0
+      min: 0
+
+  #严重违规 9分
+  seriousViolation:
+    name: seriousViolation
+    priority: 0
+    #urbanExpresswayOrHighwayDrivingLaneStopped:表示在高速公路或城市快速路上,机动车行驶车道停车;
+    urbanExpresswayOrHighwayDrivingLaneStopped:
+      name: urbanExpresswayOrHighwayDrivingLaneStopped
+      priority: 0
+      max: 0
+      min: 0
+    #urbanExpresswayOrHighwayEmergencyLaneStopped:表示在高速公路或城市快速路上,机动车应急车道内停车的;
+    urbanExpresswayOrHighwayEmergencyLaneStopped:
+      name: highwayEmergencyLaneStopped
+      priority: 0
+      max: 0
+      min: 0
+
+  #危险违规 6分
+  dangerousViolation:
+    name: dangerousViolation
+    priority: 0
+    #urbanExpresswayEmergencyLaneDriving:非紧急情况时在城市快速路或者高速公路应急车道上行驶的;
+    urbanExpresswayEmergencyLaneDriving:
+      name: urbanExpresswayEmergencyLaneDriving
+      priority: 0
+      max: 0
+      min: 0
+    #trafficSignalViolation:表示在交通信号灯控制的道路上,机动车驾驶人违反规定的;
+    trafficSignalViolation:
+      name: trafficSignalViolation
+      priority: 0
+      max: 0
+      min: 0
+    #urbanExpresswayOrHighwaySpeedOverLimit20to50:表示在高速公路或城市快速路上,机动车驾驶人超速20%-50%的;
+    urbanExpresswayOrHighwaySpeedOverLimit20to50:
+      name: urbanExpresswayOrHighwaySpeedOverLimit20to50
+      priority: 0
+      max: 0
+      min: 0
+    #generalRoadSpeedOverLimit50:表示在非高速公路或城市快速路,机动车驾驶人超速50%的;
+    generalRoadSpeedOverLimit50:
+      name: generalRoadSpeedOverLimit50
+      priority: 0
+      max: 0
+      min: 0
+
+  #一般违规 3分
+  generalViolation:
+    name: generalViolation
+    priority: 0
+    #generalRoadSpeedOverLimit20to50:表示在非高速公路或城市快速路,机动车驾驶人超速20%到50%的;
+    generalRoadSpeedOverLimit20to50:
+      name: generalRoadSpeedOverLimit20to50
+      priority: 0
+      max: 0
+      min: 0
+    #urbanExpresswayOrHighwaySpeedUnderLimit: 对于驾驶机动车在高速公路或城市快速路上行驶低于规定最低时速的,将处以记3分的处罚;
+    urbanExpresswayOrHighwaySpeedUnderLimit:
+      name: UrbanExpresswayOrHighwaySpeedUnderLimit
+      priority: 0
+      max: 0
+      min: 0
+    #illegalDrivingOrParkingAtCrossroads:行经交叉路口不按规定行车或者停车,通过路口遇停止信号时,停在停止线以内或路口内的;
+    illegalDrivingOrParkingAtCrossroads:
+      name: illegalDrivingOrParkingAtCrossroads
+      priority: 0
+      max: 0
+      min: 0
+    # 超车类型
+    # overtake_on_right: 从前车右侧超车的
+    overtake_on_right:
+      name: overtake_on_right
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_when_turn_around: 前车掉头时超车的
+    overtake_when_turn_around:
+      name: overtake_when_turn_around
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_when_passing_car: 前车通过时超车的
+    overtake_when_passing_car:
+      name: overtake_when_passing_car
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_in_forbid_lane: 在不该占用车道超车的,前方车辆缓慢行驶
+    overtake_in_forbid_lane:
+      name: overtake_in_forbid_lane
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_in_ramp: 在匝道超车的
+    overtake_in_ramp:
+      name: overtake_in_ramp
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_in_tunnel: 在隧道超车的
+    overtake_in_tunnel:
+      name: overtake_in_tunnel
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_on_accelerate_lane: 在加速车道超车
+    overtake_on_accelerate_lane:
+      name: overtake_on_accelerate_lane
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_on_decelerate_lane: 在减速车道超车
+    overtake_on_decelerate_lane:
+      name: overtake_on_decelerate_lane
+      priority: 0
+      max: 0
+      min: 0
+    # overtake_in_different_senerios: 在不同路口超车情况超车的
+    overtake_in_different_senerios:
+      name: overtake_in_different_senerios
+      priority: 0
+      max: 0
+      min: 0
+    # 减速让行违规类
+    # slow_down_in_crosswalk: 行经人行横道,未减速行驶的
+    slow_down_in_crosswalk:
+      name: slow_down_in_crosswalk
+      priority: 0
+      max: 0
+      min: 0
+    # avoid_pedestrian_in_crosswalk:遇行人正在通过人行横道时未停车让行的
+    avoid_pedestrian_in_crosswalk:
+      name: avoid_pedestrian_in_crosswalk
+      priority: 0
+      max: 0
+      min: 0
+    # avoid_pedestrian_in_the_road :行经没有交通信号的道路时,遇行人横过道路未避让的
+    avoid_pedestrian_in_the_road:
+      name: avoid_pedestrian_in_the_road
+      priority: 0
+      max: 0
+      min: 0
+    # aviod_pedestrian_when_turning:转弯的机动车未让行人先行的
+    aviod_pedestrian_when_turning:
+      name: aviod_pedestrian_when_turning
+      priority: 0
+      max: 0
+      min: 0
+
+    # 违反交通标志
+    # NoStraightThrough: 禁止直行标志地方直行
+    NoStraightThrough:
+      name: NoStraightThrough
+      priority: 0
+      max: 0
+      min: 0
+    
+    # SpeedLimitViolation: 违反限速规定
+    SpeedLimitViolation:
+      name: SpeedLimitViolation
+      priority: 0
+      max: 0
+      min: 0
+
+    # MinimumSpeedLimitViolation: 违反最低限速规定
+    MinimumSpeedLimitViolation:
+      name: MinimumSpeedLimitViolation
+      priority: 0
+      max: 0
+      min: 0
+  #轻度违规 1分
+  minorViolation:
+    name: minorViolation
+    priority: 0
+    noUTurnViolation:
+      #NoU:是“No U-Turn”(禁止掉头)的缩写,表示这一行为是违反禁止掉头规定的。
+      #TurnViolation:表示这是一种违规行为,即掉头行为本身是不被允许的。
+      name: noUTurnViolation
+      priority: 0
+      max: 0
+      min: 0
+
+
+  #警告违规 0分
+  warningViolation:
+    name: warningViolation
+    priority: 0
+    # generalLimit60RoadSpeedOverLimit0to50:驾驶机动车在限速低于60公里/小时的公路上超过规定车速50%以下的,该指标暂时不做
+    # generalLimit60RoadSpeedOverLimit0to50:
+    #   name: generalRoadSpeedOverLimit20to50
+    #   priority: 0
+    #   max: 0
+    #   min: 0
+    # urbanExpresswayOrHighwaySpeedOverLimit0to20:在高速公路或城市快速路,驾驶机动车超速20%以下的
+    urbanExpresswayOrHighwaySpeedOverLimit0to20:
+      name: urbanExpresswayOrHighwaySpeedOverLimit0to20
+      priority: 0
+      max: 0
+      min: 0
+    # urbanExpresswayOrHighwayRideLaneDivider: 机动车在高速公路或者城市快速路上骑轧车行道分界线的
+    urbanExpresswayOrHighwayRideLaneDivider:
+      name: urbanExpresswayOrHighwayRideLaneDivider
+      priority: 0
+      max: 0
+      min: 0
+    # generalRoadIrregularLaneUse:驾驶机动车在高速公路、城市快速路以外的道路上不按规定车道行驶的,这里指的是车辆占用非机动车道;
+    generalRoadIrregularLaneUse:
+      name: generalRoadIrregularLaneUse
+      priority: 0
+      max: 0
+      min: 0
+

+ 88 - 0
modules/config/config.py

@@ -0,0 +1,88 @@
+
+# 安全指标列表
+SAFETY_METRIC_LIST = [
+    "TTC",
+    "MTTC",
+    "THW",
+    "LonSD",
+    "LatSD",
+    "DRAC",
+    "BTN",
+    "STN",
+    "collisionRisk",
+    "collisionSeverity",
+]
+
+
+COMFORT_INFO = [
+    "simTime",
+    "simFrame",
+    "speedX",
+    "speedY",
+    "accelX",
+    "accelY",
+    "curvHor",
+    "lightMask",
+    "v",
+    "lat_acc",
+    "lon_acc",
+    "time_diff",
+    "lon_acc_diff",
+    "lon_acc_roc",
+    "speedH",
+    "accelH",
+]
+
+
+OVERTAKE_INFO = [
+    "simTime",
+    "simFrame",
+    "playerId",
+    "speedX",
+    "speedY",
+    "posX",
+    "posY",
+    "posH",
+    "lane_id",
+    "lane_type",
+    "road_type",
+    "interid",
+    "crossid",
+]
+SLOWDOWN_INFO = [
+    "simTime",
+    "simFrame",
+    "playerId",
+    "speedX",
+    "speedY",
+    "posX",
+    "posY",
+    "crossid",
+    "lane_type",
+]
+TURNAROUND_INFO = [
+    "simTime",
+    "simFrame",
+    "playerId",
+    "speedX",
+    "speedY",
+    "posX",
+    "posY",
+    "sign_type1",
+    "lane_type",
+]
+
+TRFFICSIGN_INFO = [
+    "simTime",
+    "simFrame",
+    "playerId",
+    "speedX",
+    "speedY",
+    "v",
+    "posX",
+    "posY",
+    "sign_type1",
+    "sign_ref_link",
+    "sign_x",
+    "sign_y",
+]

+ 185 - 0
modules/lib/common.py

@@ -0,0 +1,185 @@
+import json
+from typing import List, Dict, Tuple
+
+import numpy as np
+import pandas as pd
+
+        
+def dict2json(data_dict: Dict, file_path: str) -> None:
+    """
+    将字典转换为JSON格式并保存到文件中。
+
+    参数:
+    data_dict (dict): 要转换的字典。
+    file_path (str): 保存JSON文件的路径。
+    """
+    try:
+        with open(file_path, "w", encoding="utf-8") as json_file:
+            json.dump(data_dict, json_file, ensure_ascii=False, indent=4)
+        print(f"JSON文件已保存到 {file_path}")
+    except Exception as e:
+        print(f"保存JSON文件时出错: {e}")
+
+
+def get_interpolation(x: float, point1: Tuple[float, float], point2: Tuple[float, float]) -> float:
+    """
+    根据两个点确定一元一次方程,并在定义域内求解。
+
+    参数:
+        x: 自变量的值。
+        point1: 第一个点的坐标。
+        point2: 第二个点的坐标。
+
+    返回:
+        y: 因变量的值。
+    """
+    try:
+        k = (point1[1] - point2[1]) / (point1[0] - point2[0])
+        b = (point1[0] * point2[1] - point1[1] * point2[0]) / (point1[0] - point2[0])
+        return x * k + b
+    except Exception as e:
+        return f"Error: {str(e)}"
+
+
+def get_frame_with_time(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
+    """
+    将两个DataFrame按照时间列进行合并,并返回结果。
+
+    参数:
+        df1: 包含start_time和end_time的DataFrame。
+        df2: 包含simTime和simFrame的DataFrame。
+
+    返回:
+        合并后的DataFrame。
+    """
+    df1_start = df1.merge(df2[["simTime", "simFrame"]], left_on="start_time", right_on="simTime")
+    df1_start = df1_start[["start_time", "simFrame"]].rename(columns={"simFrame": "start_frame"})
+
+    df1_end = df1.merge(df2[["simTime", "simFrame"]], left_on="end_time", right_on="simTime")
+    df1_end = df1_end[["end_time", "simFrame"]].rename(columns={"simFrame": "end_frame"})
+
+    return pd.concat([df1_start, df1_end], axis=1)
+
+
+class PolynomialCurvatureFitting:
+    def __init__(self, data_path: str, degree: int = 3):
+        self.data_path = data_path
+        self.degree = degree
+        self.data = pd.read_csv(self.data_path)
+        self.points = self.data[['centerLine_x', 'centerLine_y']].values
+        self.x_data, self.y_data = self.points[:, 0], self.points[:, 1]
+
+    def curvature(self, coefficients: np.ndarray, x: float) -> float:
+        """
+        计算多项式在x处的曲率。
+
+        参数:
+            coefficients: 多项式系数。
+            x: 自变量的值。
+
+        返回:
+            曲率值。
+        """
+        first_derivative = np.polyder(coefficients)
+        second_derivative = np.polyder(first_derivative)
+        return np.abs(np.polyval(second_derivative, x)) / (1 + np.polyval(first_derivative, x) ** 2) ** (3 / 2)
+
+    def polynomial_fit(self, x_window: np.ndarray, y_window: np.ndarray) -> Tuple[np.ndarray, np.poly1d]:
+        """
+        对给定的窗口数据进行多项式拟合。
+
+        参数:
+            x_window: 窗口内的x数据。
+            y_window: 窗口内的y数据。
+
+        返回:
+            多项式系数和多项式对象。
+        """
+        coefficients = np.polyfit(x_window, y_window, self.degree)
+        return coefficients, np.poly1d(coefficients)
+
+    def find_best_window(self, point: Tuple[float, float], window_size: int) -> int:
+        """
+        找到最佳窗口的起始索引。
+
+        参数:
+            point: 目标点的坐标。
+            window_size: 窗口大小。
+
+        返回:
+            最佳窗口的起始索引。
+        """
+        x1, y1 = point
+        window_means = np.array([
+            (np.mean(self.x_data[start:start + window_size]), np.mean(self.y_data[start:start + window_size]))
+            for start in range(len(self.x_data) - window_size + 1)
+        ])
+        distances = np.sqrt((x1 - window_means[:, 0]) ** 2 + (y1 - window_means[:, 1]) ** 2)
+        return np.argmin(distances)
+
+    def find_projection(self, x_point: float, y_point: float, polynomial: np.poly1d, x_data_range: Tuple[float, float], search_step: float = 0.0001) -> Tuple[float, float, float]:
+        """
+        找到目标点在多项式曲线上的投影点。
+
+        参数:
+            x_point: 目标点的x坐标。
+            y_point: 目标点的y坐标。
+            polynomial: 多项式对象。
+            x_data_range: x的取值范围。
+            search_step: 搜索步长。
+
+        返回:
+            投影点的坐标和最小距离。
+        """
+        x_values = np.arange(x_data_range[0], x_data_range[1], search_step)
+        y_values = polynomial(x_values)
+        distances = np.sqrt((x_point - x_values) ** 2 + (y_point - y_values) ** 2)
+        min_idx = np.argmin(distances)
+        return x_values[min_idx], y_values[min_idx], distances[min_idx]
+
+    def fit_and_project(self, points: List[Tuple[float, float]], window_size: int) -> List[Dict]:
+        """
+        对每个点进行多项式拟合和投影计算。
+
+        参数:
+            points: 目标点列表。
+            window_size: 窗口大小。
+
+        返回:
+            包含投影点、曲率、曲率变化和最小距离的字典列表。
+        """
+        results = []
+        for point in points:
+            best_start = self.find_best_window(point, window_size)
+            x_window = self.x_data[best_start:best_start + window_size]
+            y_window = self.y_data[best_start:best_start + window_size]
+            coefficients, polynomial = self.polynomial_fit(x_window, y_window)
+            proj_x, proj_y, min_distance = self.find_projection(point[0], point[1], polynomial, (min(x_window), max(x_window)))
+            curvature_value = self.curvature(coefficients, proj_x)
+            second_derivative_coefficients = np.polyder(np.polyder(coefficients))
+            curvature_change_value = np.polyval(second_derivative_coefficients, proj_x)
+
+            results.append({
+                'projection': (proj_x, proj_y),
+                'curvHor': curvature_value,
+                'curvHorDot': curvature_change_value,
+                'coefficients': coefficients,
+                'laneOffset': min_distance
+            })
+
+        return results
+
+    
+
+if __name__ == "__main__":
+    data_path = "/home/kevin/kevin/zhaoyuan/zhaoyuan/data/raw/data/LaneMap.csv"
+    point_path = "/home/kevin/kevin/zhaoyuan/zhaoyuan/data/raw/data/EgoMap.csv"
+
+    points_data = pd.read_csv(point_path)
+    points = points_data[['posX', 'posY']].values
+
+    window_size = 4
+
+    fitting_instance = PolynomialCurvatureFitting(data_path)
+    projections = fitting_instance.fit_and_project(points, window_size)
+    fitting_instance.plot_results(points, projections)

+ 229 - 0
modules/lib/data_process.py

@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2024 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn)
+@Data:              2024/10/17
+@Last Modified:     2024/10/17
+@Summary:           Evaluation functions
+"""
+
+import os
+
+import numpy as np
+import pandas as pd
+
+import yaml
+
+
+
+from modules.lib.log_manager import LogManager
+
+# from lib import log  # 确保这个路径是正确的,或者调整它
+# logger = None  # 初始化为 None
+
+
+class DataPreprocessing:
+    def __init__(self, data_path, config_path):
+        # Initialize paths and data containers
+        # self.logger = log.get_logger()
+        
+        self.data_path = data_path
+        self.case_name = os.path.basename(os.path.normpath(data_path))
+
+        self.config_path = config_path
+
+        # Initialize DataFrames
+        self.object_df = pd.DataFrame()
+        self.driver_ctrl_df = pd.DataFrame()
+        self.vehicle_sys_df = pd.DataFrame()
+        self.ego_data_df = pd.DataFrame()
+
+        # Environment data
+        self.lane_info_df = pd.DataFrame()
+        self.road_mark_df = pd.DataFrame()
+        self.road_pos_df = pd.DataFrame()
+        self.traffic_light_df = pd.DataFrame()
+        self.traffic_signal_df = pd.DataFrame()
+
+        self.vehicle_config = {}
+        self.safety_config = {}
+        self.comfort_config = {}
+        self.efficient_config = {}
+        self.function_config = {}
+        self.traffic_config = {}
+
+        # Initialize data for later processing
+        self.obj_data = {}
+        self.ego_data = {}
+        self.obj_id_list = []
+
+        # Data quality level
+        self.data_quality_level = 15
+
+        # Process mode and prepare report information
+        self._process_mode()
+        self._get_yaml_config()
+        self.report_info = self._get_report_info(self.obj_data.get(1, pd.DataFrame()))
+
+    def _process_mode(self):
+        """Handle different processing modes."""
+        self._real_process_object_df()
+
+    def _get_yaml_config(self):
+        with open(self.config_path, 'r') as f:
+            full_config = yaml.safe_load(f)
+
+        modules = ["vehicle", "T_threshold", "safety", "comfort", "efficient", "function", "traffic"]
+        
+        # 1. 初始化 vehicle_config(不涉及 T_threshold 合并)
+        self.vehicle_config = full_config[modules[0]]
+        
+        # 2. 定义 T_threshold_config(封装为字典)
+        T_threshold_config = {"T_threshold": full_config[modules[1]]}
+
+        
+        # 3. 统一处理需要合并 T_threshold 的模块
+        # 3.1 safety_config
+        self.safety_config = {"safety": full_config[modules[2]]}
+        self.safety_config.update(T_threshold_config)
+        
+        # 3.2 comfort_config
+        self.comfort_config = {"comfort": full_config[modules[3]]}
+        self.comfort_config.update(T_threshold_config)
+        
+        # 3.3 efficient_config
+        self.efficient_config = {"efficient": full_config[modules[4]]}
+        self.efficient_config.update(T_threshold_config)
+        
+        # 3.4 function_config
+        self.function_config = {"function": full_config[modules[5]]}
+        self.function_config.update(T_threshold_config)
+        
+        # 3.5 traffic_config
+        self.traffic_config = {"traffic": full_config[modules[6]]}
+        self.traffic_config.update(T_threshold_config)
+
+    @staticmethod
+    def cal_velocity(lat_v, lon_v):
+        """Calculate resultant velocity from lateral and longitudinal components."""
+        return np.sqrt(lat_v**2 + lon_v**2)
+
+    def _real_process_object_df(self):
+        """Process the object DataFrame."""
+        try:
+            # 读取 CSV 文件
+            merged_csv_path = os.path.join(self.data_path, "merged_ObjState.csv")
+            self.object_df = pd.read_csv(
+                merged_csv_path, dtype={"simTime": float}
+            ).drop_duplicates(subset=["simTime", "simFrame", "playerId"])
+
+            data = self.object_df.copy()
+            
+
+            # Calculate common parameters
+            data["lat_v"] = data["speedY"] * 1
+            data["lon_v"] = data["speedX"] * 1
+            data["v"] = data.apply(
+                lambda row: self.cal_velocity(row["lat_v"], row["lon_v"]), axis=1
+            )
+            data["v"] = data["v"]  # km/h
+
+            # Calculate acceleration components
+            data["lat_acc"] = data["accelY"] * 1
+            data["lon_acc"] = data["accelX"] * 1
+            data["accel"] = data.apply(
+                lambda row: self.cal_velocity(row["lat_acc"], row["lon_acc"]), axis=1
+            )
+
+            # Drop rows with missing 'type' and reset index
+            data = data.dropna(subset=["type"])
+            data.reset_index(drop=True, inplace=True)
+            self.object_df = data.copy()
+
+            # Calculate respective parameters for each object
+            for obj_id, obj_data in data.groupby("playerId"):
+                self.obj_data[obj_id] = self._calculate_object_parameters(obj_data)
+
+            # Get object id list
+            EGO_PLAYER_ID = 1
+            self.obj_id_list = list(self.obj_data.keys())
+            self.ego_data = self.obj_data[EGO_PLAYER_ID]
+
+        except Exception as e:
+            # self.logger.error(f"Error processing object DataFrame: {e}")
+            raise
+
+    def _calculate_object_parameters(self, obj_data):
+        """Calculate additional parameters for a single object."""
+        obj_data = obj_data.copy()
+        obj_data["time_diff"] = obj_data["simTime"].diff()
+
+        obj_data["lat_acc_diff"] = obj_data["lat_acc"].diff()
+        obj_data["lon_acc_diff"] = obj_data["lon_acc"].diff()
+        obj_data["yawrate_diff"] = obj_data["speedH"].diff()
+
+        obj_data["lat_acc_roc"] = (
+            obj_data["lat_acc_diff"] / obj_data["time_diff"]
+        ).replace([np.inf, -np.inf], [9999, -9999])
+        obj_data["lon_acc_roc"] = (
+            obj_data["lon_acc_diff"] / obj_data["time_diff"]
+        ).replace([np.inf, -np.inf], [9999, -9999])
+        obj_data["accelH"] = (
+            obj_data["yawrate_diff"] / obj_data["time_diff"]
+        ).replace([np.inf, -np.inf], [9999, -9999])
+
+        return obj_data
+
+    def _get_driver_ctrl_data(self, df):
+        """
+        Process and get driver control information.
+
+        Args:
+            df: A DataFrame containing driver control data.
+
+        Returns:
+            A dictionary of driver control info.
+        """
+        driver_ctrl_data = {
+            "time_list": df["simTime"].round(2).tolist(),
+            "frame_list": df["simFrame"].tolist(),
+            "brakePedal_list": (
+                (df["brakePedal"] * 100).tolist()
+                if df["brakePedal"].max() < 1
+                else df["brakePedal"].tolist()
+            ),
+            "throttlePedal_list": (
+                (df["throttlePedal"] * 100).tolist()
+                if df["throttlePedal"].max() < 1
+                else df["throttlePedal"].tolist()
+            ),
+            "steeringWheel_list": df["steeringWheel"].tolist(),
+        }
+        return driver_ctrl_data
+
+    def _get_report_info(self, df):
+        """Extract report information from the DataFrame."""
+        mileage = self._mileage_cal(df)
+        duration = self._duration_cal(df)
+        return {"mileage": mileage, "duration": duration}
+
+    def _mileage_cal(self, df):
+        """Calculate mileage based on the driving data."""
+        if df["travelDist"].nunique() == 1:
+            df["time_diff"] = df["simTime"].diff().fillna(0)
+            df["avg_speed"] = (df["v"] + df["v"].shift()).fillna(0) / 2
+            df["distance_increment"] = df["avg_speed"] * df["time_diff"] / 3.6
+            df["travelDist"] = df["distance_increment"].cumsum().fillna(0)
+
+            mileage = round(df["travelDist"].iloc[-1] - df["travelDist"].iloc[0], 2)
+            return mileage
+        return 0.0  # Return 0 if travelDist is not valid
+
+    def _duration_cal(self, df):
+        """Calculate duration of the driving data."""
+        return df["simTime"].iloc[-1] - df["simTime"].iloc[0]

+ 115 - 0
modules/lib/log_manager.py

@@ -0,0 +1,115 @@
+import logging
+import os
+import threading
+from logging.handlers import QueueHandler, QueueListener
+from queue import Queue
+
+class LogManager:
+    _instance = None
+    _lock = threading.Lock()
+    _configured = False  # 确保单例配置唯一性
+    
+    def __new__(cls, log_path="/home/kevin/kevin/zhaoyuan/zhaoyuan/log/app.log"):
+        with cls._lock:
+            if not cls._instance:
+                cls._instance = super().__new__(cls)
+                # 路径处理逻辑
+                cls._instance._full_path = log_path
+                cls._instance._init_logger()
+            return cls._instance
+
+    @classmethod
+    def _validate_path(cls, path):
+        """路径验证与创建"""
+        default_path = os.path.join(os.getcwd(), "logs")
+        target_path = path or default_path
+        
+        try:
+            os.makedirs(target_path, exist_ok=True)
+            # 测试写入权限
+            test_file = os.path.join(target_path, "write_test.tmp")
+            with open(test_file, "w") as f:
+                f.write("permission_test")
+            os.remove(test_file)
+            return target_path
+        except PermissionError:
+            logging.error(f"Insufficient permissions for {target_path}, using default")
+            os.makedirs(default_path, exist_ok=True)
+            return default_path
+        except Exception as e:
+            logging.error(f"Path error: {str(e)}, using default")
+            return default_path
+
+    @staticmethod
+    def _sanitize_filename(name):
+        """文件名合法性过滤"""
+        invalid_chars = {'/', '\\', ':', '*', '?', '"', '<', '>', '|'}
+        cleaned = ''.join(c for c in name if c not in invalid_chars)
+        return cleaned[:50]  # 限制文件名长度
+
+    def _init_logger(self):
+        """初始化日志系统"""
+        self.log_queue = Queue(-1)
+        self.logger = logging.getLogger("GlobalLogger")
+        self.logger.setLevel(logging.DEBUG)
+
+        if not self.logger.handlers:
+            # 创建带线程标识的格式器
+            formatter = logging.Formatter(
+                "[%(asctime)s][%(levelname)s][%(threadName)s] %(message)s"
+            )
+            
+            # 文件处理器(自动UTF-8编码)
+            file_handler = logging.FileHandler(
+                self._full_path, 
+                encoding='utf-8',
+                delay=True  # 延迟打开文件直到实际写入
+            )
+            file_handler.setFormatter(formatter)
+            
+            # 控制台处理器(仅ERROR级别)
+            console_handler = logging.StreamHandler()
+            console_handler.setLevel(logging.ERROR)
+            console_handler.setFormatter(formatter)
+
+            # 异步监听器
+            self.listener = QueueListener(
+                self.log_queue,
+                file_handler,
+                console_handler,
+                respect_handler_level=True
+            )
+            self.listener.start()
+
+            # 队列处理器配置
+            queue_handler = QueueHandler(self.log_queue)
+            queue_handler.setLevel(logging.DEBUG)
+            self.logger.addHandler(queue_handler)
+            self.logger.propagate = False
+
+    def get_logger(self):
+        """获取线程安全日志器"""
+        return self.logger
+
+    @classmethod
+    def shutdown(cls):
+        """安全关闭日志系统"""
+        if cls._instance:
+            cls._instance.listener.stop()
+            cls._instance = None
+
+# 使用示例
+if __name__ == "__main__":
+    # 自定义路径和文件名
+    custom_logger = LogManager(
+        log_path="/home/kevin/kevin/zhaoyuan/zhaoyuan/log/runtime.log"
+    ).get_logger()
+    
+    custom_logger.info("Custom logger configured successfully")
+    
+    # 默认配置
+    default_logger = LogManager().get_logger()
+    default_logger.warning("Using default configuration")
+    
+    # 安全关闭
+    LogManager.shutdown()

+ 255 - 0
modules/lib/score.py

@@ -0,0 +1,255 @@
+import json 
+
+from typing import Dict, Any
+
+from modules.lib.log_manager import LogManager
+
+from modules.config import config
+
+class Score:  
+    def __init__(self, yaml_config ):
+        self.logger = LogManager().get_logger()  # 获取全局日志实例   
+        self.calculated_metrics = None
+        self.config = yaml_config
+        self.module_config = None
+        self.module_name = None
+        self.t_threshold = None
+        self.process_config(self.config)
+        self.level_3_merics = self._extract_level_3_metrics(self.module_config) 
+        self.result = {}  
+    
+    def process_config(self, config_dict):
+        t_threshold = config_dict.get("T_threshold")
+        if t_threshold is None:
+            raise ValueError("配置中缺少 T_threshold 键")
+
+        module_keys = [key for key in config_dict if (key != "T_threshold")]
+        if len(module_keys) != 1:
+            raise ValueError("配置字典应包含且仅包含一个模块配置键")
+        
+        module_name = module_keys[0]
+        module_config = config_dict[module_name]
+        # print(f'模块名称:{module_name}')
+        # print(f'模块配置:{module_config}')
+        # print(f'T_threshold:{t_threshold}')
+
+        # 实际业务逻辑(示例:存储到对象属性)
+        self.module_name = module_name
+        self.module_config = module_config
+        self.t_threshold = t_threshold
+        self.logger.info(f'模块名称:{module_name}')
+        self.logger.info(f'模块配置:{module_config}')
+        self.logger.info(f'T_threshold:{t_threshold}')
+    def _extract_level_3_metrics(self, d):
+        name = []
+        for key, value in d.items():
+            if isinstance(value, dict):  # 如果值是字典,继续遍历
+                self._extract_level_3_metrics(value)
+            elif key == 'name':  # 找到name键时,将值添加到列表
+                name.append(value)
+        return name
+                         
+    def is_within_range(self, value, min_val, max_val):  
+        return min_val <= value <= max_val  
+  
+    def evaluate_level_3(self, metrics):  
+        result3 = {}  
+        name = metrics.get('name')  
+        priority = metrics.get('priority')  
+        max_val = metrics.get('max')  
+        min_val = metrics.get('min')
+        
+        self.level_3_merics.append(name)  
+        metric_value = self.calculated_metrics.get(name)  
+        result3[name] = {  
+            'result': True,  
+            'priority': priority 
+        } 
+        if metric_value is None:  
+            return result3  
+  
+        if not self.is_within_range(metric_value, min_val, max_val) and priority == 0:  
+            result3[name]['result'] = False  
+        elif not self.is_within_range(metric_value, min_val, max_val) and priority == 1:  
+            result3[name]['priority_1_count'] += 1  
+  
+        # Count priority 1 failures and override result if more than 3  
+       
+        priority_1_metrics = [v for v in result3.values() if v['priority'] == 1 and not v['result']]  
+        if len([m for m in priority_1_metrics if not m['result']]) > 3:  
+            result3[name]['result'] = False
+  
+        return result3  
+  
+    def evaluate_level_2(self, metrics):  
+        result2 = {}  
+        name = metrics.get('name')  
+        priority = metrics.get('priority') 
+        result2[name] = {}  
+  
+        for metric, sub_metrics in metrics.items():  
+            if metric not in ['name', 'priority']:  
+                result2[name].update(self.evaluate_level_3(sub_metrics))  
+  
+        # Aggregate results for level 2  config.T0 config.T1 config.T2
+        priority_0_count = sum(1 for v in result2[name].values() if v['priority'] == 0 and not v['result']) 
+        priority_1_count = sum(1 for v in result2[name].values() if v['priority'] == 1 and not v['result']) 
+        priority_2_count = sum(1 for v in result2[name].values() if v['priority'] == 2 and not v['result']) 
+
+        if priority_0_count > self.t_threshold['T0_threshold']:  
+            result2[name]['result'] = False
+            
+        elif priority_1_count > self.t_threshold['T1_threshold']:  
+            for metric in result2[name].values():  
+                metric['result'] = False 
+        elif priority_2_count > self.t_threshold['T2_threshold']:  
+            for metric in result2[name].values():  
+                metric['result'] = False
+        else:  
+            result2[name]['result'] = True  # Default to True unless overridden  
+        result2[name]['priority'] = priority 
+        result2[name]['priority_0_count'] = priority_0_count  
+        result2[name]['priority_1_count'] = priority_1_count
+        result2[name]['priority_2_count'] = priority_2_count  
+  
+        return result2  
+  
+    def evaluate_level_1(self): 
+
+        name = self.module_config.get('name')
+        priority = self.module_config.get('priority') 
+        result1 = {} 
+        result1[name] = {}  
+        for metric, metrics in self.module_config.items():
+            if metric not in ['name', 'priority']:  
+                result1[name].update(self.evaluate_level_2(metrics))
+                
+        # Aggregate results for level 2  config.T0 config.T1 config.T2
+        priority_0_count = sum(1 for v in result1[name].values() if v['priority'] == 0 and not v['result']) 
+        priority_1_count = sum(1 for v in result1[name].values() if v['priority'] == 1 and not v['result']) 
+        priority_2_count = sum(1 for v in result1[name].values() if v['priority'] == 2 and not v['result']) 
+
+        if priority_0_count > self.t_threshold['T0_threshold']:  
+            result1[name]['result'] = False
+            
+        elif priority_1_count > self.t_threshold['T1_threshold']:  
+            for metric in result1[name].values():  
+                metric['result'] = False 
+        elif priority_2_count > self.t_threshold['T2_threshold']:  
+            for metric in result1[name].values():  
+                metric['result'] = False
+        else:  
+            result1[name]['result'] = True  # Default to True unless overridden  
+        result1[name]['priority'] = priority 
+        result1[name]['priority_0_count'] = priority_0_count  
+        result1[name]['priority_1_count'] = priority_1_count
+        result1[name]['priority_2_count'] = priority_2_count  
+
+        return result1  
+  
+    def evaluate(self, calculated_metrics):
+        self.calculated_metrics = calculated_metrics  
+        self.result = self.evaluate_level_1()  
+        return self.result 
+
+    def evaluate_single_case(self, case_name, priority, json_dict):
+
+        name = case_name
+        result = {} 
+        result[name] = {}  
+        # print(json_dict)
+        # Aggregate results for level 2  config.T0 config.T1 config.T2
+        priority_0_count = sum(1 for v in json_dict.values() if v['priority'] == 0 and not v['result']) 
+        priority_1_count = sum(1 for v in json_dict.values() if v['priority'] == 1 and not v['result']) 
+        priority_2_count = sum(1 for v in json_dict.values() if v['priority'] == 2 and not v['result']) 
+
+        if priority_0_count > config.T0:  
+            result[name]['result'] = False
+            
+        elif priority_1_count > config.T1:  
+            for metric in result[name].values():  
+                metric['result'] = False 
+        elif priority_2_count > config.T2:  
+            for metric in result[name].values():  
+                metric['result'] = False
+        else:  
+            result[name]['result'] = True  # Default to True unless overridden  
+        result[name]['priority'] = priority 
+        result[name]['priority_0_count'] = priority_0_count  
+        result[name]['priority_1_count'] = priority_1_count
+        result[name]['priority_2_count'] = priority_2_count  
+        result[case_name].update(json_dict)
+        
+        return result  
+import yaml
+def load_thresholds(config_path: str) -> Dict[str, int]:
+    """从YAML配置文件加载阈值参数"""
+    with open(config_path, 'r') as f:
+        config = yaml.safe_load(f)
+    return {
+        "T0": config['T_threshold']['T0_threshold'],
+        "T1": config['T_threshold']['T1_threshold'],
+        "T2": config['T_threshold']['T2_threshold']
+    }
+
+def get_overall_result(report: Dict[str, Any], config_path: str) -> Dict[str, Any]:
+    """
+    处理评测报告并添加总体结果字段
+    
+    参数:
+        report: 原始评测报告字典
+        config_path: YAML配置文件路径
+        
+    返回:
+        添加了 overall_result 的处理后报告
+    """
+    # 加载阈值参数
+    thresholds = load_thresholds(config_path)
+    
+    # 初始化计数器
+    counters = {'p0': 0, 'p1': 0, 'p2': 0}
+    
+    # 目标分类
+    target_categories = ['function', 'safety', 'comfort', 'traffic', 'efficient']
+    
+    # 直接统计每个维度的结果
+    for category in target_categories:
+        if category in report:
+            # 如果该维度的结果为False,根据其priority增加对应计数
+            if not report[category].get('result', True):
+                priority = report[category].get('priority')
+                if priority == 0:
+                    counters['p0'] += 1
+                elif priority == 1:
+                    counters['p1'] += 1
+                elif priority == 2:
+                    counters['p2'] += 1
+    
+    # 阈值判断逻辑
+    thresholds_exceeded = (
+        counters['p0'] > thresholds['T0'],
+        counters['p1'] > thresholds['T1'],
+        counters['p2'] > thresholds['T2']
+    )
+    
+    # 生成处理后的报告
+    processed_report = report.copy()
+    processed_report['overall_result'] = not any(thresholds_exceeded)
+    
+    # 添加统计信息
+    processed_report['threshold_checks'] = {
+        'T0_threshold': thresholds['T0'],
+        'T1_threshold': thresholds['T1'],
+        'T2_threshold': thresholds['T2'],
+        'actual_counts': counters
+    }
+    
+    return processed_report
+  
+  
+def main():  
+    pass
+   
+  
+if __name__ == '__main__':  
+    main()

+ 638 - 0
modules/metric/comfort.py

@@ -0,0 +1,638 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2023 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn), yangzihao(yangzihao@china-icv.cn)
+@Data:              2023/06/25
+@Last Modified:     2023/06/25
+@Summary:           Comfort metrics
+"""
+
+import sys
+import math
+import pandas as pd
+import numpy as np
+import scipy.signal
+from pathlib import Path 
+
+from modules.lib.score import Score
+from modules.lib.common import get_interpolation, get_frame_with_time
+from modules.config import config
+from modules.lib import data_process
+
+from modules.lib.log_manager import LogManager
+
+
+def peak_valley_decorator(method):
+    def wrapper(self, *args, **kwargs):
+        peak_valley = self._peak_valley_determination(self.df)
+        pv_list = self.df.loc[peak_valley, ['simTime', 'speedH']].values.tolist()
+        if len(pv_list) != 0:
+            flag = True
+            p_last = pv_list[0]
+
+            for i in range(1, len(pv_list)):
+                p_curr = pv_list[i]
+
+                if self._peak_valley_judgment(p_last, p_curr):
+                    # method(self, p_curr, p_last)
+                    method(self, p_curr, p_last, flag, *args, **kwargs)
+                else:
+                    p_last = p_curr
+
+            return method
+        else:
+            flag = False
+            p_curr = [0, 0]
+            p_last = [0, 0]
+            method(self, p_curr, p_last, flag, *args, **kwargs)
+            return method
+
+    return wrapper
+
+
+class Comfort(object):
+    """
+    Class for achieving comfort metrics for autonomous driving.
+
+    Attributes:
+        dataframe: Vehicle driving data, stored in dataframe format.
+    """
+
+    def __init__(self, data_processed):
+
+        # self.logger = log.get_logger()
+        self.eval_data = pd.DataFrame()
+        self.data_processed = data_processed
+        self.logger = LogManager().get_logger()  # 获取全局日志实例
+
+        self.data = data_processed.ego_data
+        # self.mileage = data_processed.report_info['mileage']
+        self.ego_df = pd.DataFrame()
+        self.discomfort_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
+
+        
+        self.calculated_value = {  
+            'Weaving': 0,  
+            'shake': 0,  
+            'cadence': 0,  
+            'slamBrake': 0,  
+            'slamAccelerate': 0,   
+        } 
+
+
+
+        # self.time_list = data_processed.driver_ctrl_data['time_list']
+        # self.frame_list = data_processed.driver_ctrl_data['frame_list']
+
+        self.time_list = self.data['simTime'].values.tolist()
+        self.frame_list = self.data['simFrame'].values.tolist()
+
+        self.count_dict = {}
+        self.duration_dict = {}
+        self.strength_dict = {}
+
+        self.discomfort_count = 0
+        self.zigzag_count = 0
+        self.shake_count = 0
+        self.cadence_count = 0
+        self.slam_brake_count = 0
+        self.slam_accel_count = 0
+
+        self.zigzag_strength = 0
+        self.shake_strength = 0
+        self.cadence_strength = 0
+        self.slam_brake_strength = 0
+        self.slam_accel_strength = 0
+
+        self.discomfort_duration = 0
+        self.zigzag_duration = 0
+        self.shake_duration = 0
+        self.cadence_duration = 0
+        self.slam_brake_duration = 0
+        self.slam_accel_duration = 0
+
+        self.zigzag_time_list = []
+        self.zigzag_frame_list = []
+        self.zigzag_stre_list = []
+        self.cur_ego_path_list = []
+        self.curvature_list = []
+
+        self._get_data()
+        self._comf_param_cal()
+
+    def _get_data(self):
+        """
+
+        """
+        self.ego_df = self.data[config.COMFORT_INFO].copy()
+        self.df = self.ego_df.reset_index(drop=True)  # 索引是csv原索引
+
+    # def _cal_cur_ego_path(self, row):
+    #     try:
+    #         divide = (row['speedX'] ** 2 + row['speedY'] ** 2) ** (3 / 2)
+    #         if not divide:
+    #             res = None
+    #         else:
+    #             res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
+    #     except:
+    #         res = None
+    #     return res
+    import numpy as np
+
+    def _cal_cur_ego_path(self, row):
+        try:
+            # 计算速度平方和,判断是否接近零
+            speed_sq = row['speedX']**2 + row['speedY']**2
+            if speed_sq < 1e-6:  # 阈值根据实际场景调整
+                return 1e5  # 速度接近零时返回极大曲率
+            divide = speed_sq ** (3/2)
+            res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
+            return res
+        except Exception as e:
+            return 1e5  # 异常时也返回极大值(如除零、缺失值等)
+
+
+    def _comf_param_cal(self):
+        """
+
+        """
+
+        # [log]
+        self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
+        self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
+        self.ego_df['slam_brake'] = (self.ego_df['lon_acc'] - self.ego_df['ip_dec']).apply(
+            lambda x: 1 if x < 0 else 0)
+        self.ego_df['slam_accel'] = (self.ego_df['lon_acc'] - self.ego_df['ip_acc']).apply(
+            lambda x: 1 if x > 0 else 0)
+        self.ego_df['cadence'] = self.ego_df.apply(
+            lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
+
+        # for shake detector
+        self.ego_df['cur_ego_path'] = self.ego_df.apply(self._cal_cur_ego_path, axis=1)
+        self.ego_df['curvHor'] = self.ego_df['curvHor'].astype('float')
+        self.ego_df['cur_diff'] = (self.ego_df['cur_ego_path'] - self.ego_df['curvHor']).abs()
+        self.ego_df['R'] = self.ego_df['curvHor'].apply(lambda x: 10000 if x == 0 else 1 / x)
+        self.ego_df['R_ego'] = self.ego_df['cur_ego_path'].apply(lambda x: 10000 if x == 0 else 1 / x)
+        self.ego_df['R_diff'] = (self.ego_df['R_ego'] - self.ego_df['R']).abs()
+
+        self.cur_ego_path_list = self.ego_df['cur_ego_path'].values.tolist()
+        self.curvature_list = self.ego_df['curvHor'].values.tolist()
+
+    def _peak_valley_determination(self, df):
+        """
+        Determine the peak and valley of the vehicle based on its current angular velocity.
+
+        Parameters:
+            df: Dataframe containing the vehicle angular velocity.
+
+        Returns:
+            peak_valley: List of indices representing peaks and valleys.
+        """
+
+        peaks, _ = scipy.signal.find_peaks(df['speedH'], height=0.01, distance=1, prominence=0.01)
+        valleys, _ = scipy.signal.find_peaks(-df['speedH'], height=0.01, distance=1, prominence=0.01)
+        peak_valley = sorted(list(peaks) + list(valleys))
+
+        return peak_valley
+
+    def _peak_valley_judgment(self, p_last, p_curr, tw=10000, avg=0.02):
+        """
+        Determine if the given peaks and valleys satisfy certain conditions.
+
+        Parameters:
+            p_last: Previous peak or valley data point.
+            p_curr: Current peak or valley data point.
+            tw: Threshold time difference between peaks and valleys.
+            avg: Angular velocity gap threshold.
+
+        Returns:
+            Boolean indicating whether the conditions are satisfied.
+        """
+        t_diff = p_curr[0] - p_last[0]
+        v_diff = abs(p_curr[1] - p_last[1])
+        s = p_curr[1] * p_last[1]
+
+        zigzag_flag = t_diff < tw and v_diff > avg and s < 0
+        if zigzag_flag and ([p_last[0], p_curr[0]] not in self.zigzag_time_list):
+            self.zigzag_time_list.append([p_last[0], p_curr[0]])
+        return zigzag_flag
+
+    @peak_valley_decorator
+    def zigzag_count_func(self, p_curr, p_last, flag=True):
+        """
+        Count the number of zigzag movements.
+
+        Parameters:
+            df: Input dataframe data.
+
+        Returns:
+            zigzag_count: Number of zigzag movements.
+        """
+        if flag:
+            self.zigzag_count += 1
+        else:
+            self.zigzag_count += 0
+
+    @peak_valley_decorator
+    def cal_zigzag_strength_strength(self, p_curr, p_last, flag=True):
+        """
+        Calculate various strength statistics.
+
+        Returns:
+            Tuple containing maximum strength, minimum strength,
+            average strength, and 99th percentile strength.
+        """
+        if flag:
+            v_diff = abs(p_curr[1] - p_last[1])
+            t_diff = p_curr[0] - p_last[0]
+            if t_diff > 0:
+                self.zigzag_stre_list.append(v_diff / t_diff)  # 平均角加速度
+        else:
+            self.zigzag_stre_list = []
+
+    def _shake_detector(self, Cr_diff=0.05, T_diff=0.39):
+        """
+        ego车横向加速度ax;
+        ego车轨迹横向曲率;
+        ego车轨迹曲率变化率;
+        ego车所在车lane曲率;
+        ego车所在车lane曲率变化率;
+        转向灯(暂时存疑,可不用)Cr_diff = 0.1, T_diff = 0.04
+        求解曲率公式k(t) = (x'(t) * y''(t) - y'(t) * x''(t)) / ((x'(t))^2 + (y'(t))^2)^(3/2)
+        """
+        time_list = []
+        frame_list = []
+        shake_time_list = []
+
+        df = self.ego_df.copy()
+        df = df[df['cur_diff'] > Cr_diff]
+        df['frame_ID_diff'] = df['simFrame'].diff()  # 找出行车轨迹曲率与道路曲率之差大于阈值的数据段
+        filtered_df = df[df.frame_ID_diff > T_diff]  # 此处是用大间隔区分多次晃动情景    。
+
+        row_numbers = filtered_df.index.tolist()
+        cut_column = pd.cut(df.index, bins=row_numbers)
+
+        grouped = df.groupby(cut_column)
+        dfs = {}
+        for name, group in grouped:
+            dfs[name] = group.reset_index(drop=True)
+
+        for name, df_group in dfs.items():
+            # 直道,未主动换道
+            df_group['curvHor'] = df_group['curvHor'].abs()
+            df_group_straight = df_group[(df_group.lightMask == 0) & (df_group.curvHor < 0.001)]
+            if not df_group_straight.empty:
+                tmp_list = df_group_straight['simTime'].values
+                # shake_time_list.append([tmp_list[0], tmp_list[-1]])
+                time_list.extend(df_group_straight['simTime'].values)
+                frame_list.extend(df_group_straight['simFrame'].values)
+                self.shake_count = self.shake_count + 1
+
+            # 打转向灯,道路为直道,此时晃动判断标准车辆曲率变化率为一个更大的阈值
+            df_group_change_lane = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'] < 0.001)]
+            df_group_change_lane_data = df_group_change_lane[df_group_change_lane.cur_diff > Cr_diff + 0.2]
+            if not df_group_change_lane_data.empty:
+                tmp_list = df_group_change_lane_data['simTime'].values
+                # shake_time_list.append([tmp_list[0], tmp_list[-1]])
+                time_list.extend(df_group_change_lane_data['simTime'].values)
+                frame_list.extend(df_group_change_lane_data['simFrame'].values)
+                self.shake_count = self.shake_count + 1
+
+            # 转弯,打转向灯
+            df_group_turn = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'].abs() > 0.001)]
+            df_group_turn_data = df_group_turn[df_group_turn.cur_diff.abs() > Cr_diff + 0.1]
+            if not df_group_turn_data.empty:
+                tmp_list = df_group_turn_data['simTime'].values
+                # shake_time_list.append([tmp_list[0], tmp_list[-1]])
+                time_list.extend(df_group_turn_data['simTime'].values)
+                frame_list.extend(df_group_turn_data['simFrame'].values)
+                self.shake_count = self.shake_count + 1
+
+        TIME_RANGE = 1
+        t_list = time_list
+        f_list = frame_list
+        group_time = []
+        group_frame = []
+        sub_group_time = []
+        sub_group_frame = []
+        for i in range(len(f_list)):
+            if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE:
+                sub_group_time.append(t_list[i])
+                sub_group_frame.append(f_list[i])
+            else:
+                group_time.append(sub_group_time)
+                group_frame.append(sub_group_frame)
+                sub_group_time = [t_list[i]]
+                sub_group_frame = [f_list[i]]
+
+
+        # 输出图表值
+        shake_time = [[g[0], g[-1]] for g in group_time]
+        shake_frame = [[g[0], g[-1]] for g in group_frame]
+        self.shake_count = len(shake_time)
+
+        if shake_time:
+            time_df = pd.DataFrame(shake_time, columns=['start_time', 'end_time'])
+            frame_df = pd.DataFrame(shake_frame, columns=['start_frame', 'end_frame'])
+            discomfort_df = pd.concat([time_df, frame_df], axis=1)
+            discomfort_df['type'] = 'shake'
+            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        return time_list
+
+    def _cadence_process(self, lon_acc_roc, ip_dec_roc):
+        if abs(lon_acc_roc) >= abs(ip_dec_roc) or abs(lon_acc_roc) < 1:
+            return np.nan
+        # elif abs(lon_acc_roc) == 0:
+        elif abs(lon_acc_roc) == 0:
+            return 0
+        elif lon_acc_roc > 0 and lon_acc_roc < -ip_dec_roc:
+            return 1
+        elif lon_acc_roc < 0 and lon_acc_roc > ip_dec_roc:
+            return -1
+
+    def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
+        if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
+            return np.nan
+        # elif abs(lon_acc_roc) == 0:
+        elif abs(lon_acc) == 0:
+            return 0
+        elif lon_acc > 0 and lon_acc < ip_acc:
+            return 1
+        elif lon_acc < 0 and lon_acc > ip_dec:
+            return -1
+        else:
+            return 0
+
+    def _cadence_detector(self):
+        """
+        # 加速度突变:先加后减,先减后加,先加然后停,先减然后停
+        # 顿挫:2s内多次加速度变化率突变
+        # 求出每一个特征点,然后提取,然后将每一个特征点后面的2s做一个窗口,统计频率,避免无效运算
+
+        # 将特征点筛选出来
+        # 将特征点时间作为聚类标准,大于1s的pass,小于等于1s的聚类到一个分组
+        # 去掉小于3个特征点的分组
+        """
+        # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'cadence']].copy()
+        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'cadence']].copy()
+        time_list = data['simTime'].values.tolist()
+
+        data = data[data['cadence'] != np.nan]
+        data['cadence_diff'] = data['cadence'].diff()
+        data.dropna(subset='cadence_diff', inplace=True)
+        data = data[data['cadence_diff'] != 0]
+
+        t_list = data['simTime'].values.tolist()
+        f_list = data['simFrame'].values.tolist()
+
+        TIME_RANGE = 1
+        group_time = []
+        group_frame = []
+        sub_group_time = []
+        sub_group_frame = []
+        for i in range(len(f_list)):
+            if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE:  # 特征点相邻一秒内的,算作同一组顿挫
+                sub_group_time.append(t_list[i])
+                sub_group_frame.append(f_list[i])
+            else:
+                group_time.append(sub_group_time)
+                group_frame.append(sub_group_frame)
+                sub_group_time = [t_list[i]]
+                sub_group_frame = [f_list[i]]
+
+        group_time.append(sub_group_time)
+        group_frame.append(sub_group_frame)
+        group_time = [g for g in group_time if len(g) >= 1]  # 有一次特征点则算作一次顿挫
+        group_frame = [g for g in group_frame if len(g) >= 1]
+
+
+        # 将顿挫组的起始时间为组重新统计时间
+
+        # 输出图表值
+        cadence_time = [[g[0], g[-1]] for g in group_time]
+        cadence_frame = [[g[0], g[-1]] for g in group_frame]
+
+        if cadence_time:
+            time_df = pd.DataFrame(cadence_time, columns=['start_time', 'end_time'])
+            frame_df = pd.DataFrame(cadence_frame, columns=['start_frame', 'end_frame'])
+            discomfort_df = pd.concat([time_df, frame_df], axis=1)
+            discomfort_df['type'] = 'cadence'
+            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        # 将顿挫组的起始时间为组重新统计时间
+        cadence_time_list = [time for pair in cadence_time for time in time_list if pair[0] <= time <= pair[1]]
+
+        # time_list = [element for sublist in group_time for element in sublist]
+        # merged_list = [element for sublist in res_group for element in sublist]
+        # res_df = data[data['simTime'].isin(merged_list)]
+
+        stre_list = []
+        freq_list = []
+        for g in group_time:
+            # calculate strength
+            g_df = data[data['simTime'].isin(g)]
+            strength = g_df['lon_acc'].abs().mean()
+            stre_list.append(strength)
+
+            # calculate frequency
+            cnt = len(g)
+            t_start = g_df['simTime'].iloc[0]
+            t_end = g_df['simTime'].iloc[-1]
+            t_delta = t_end - t_start
+            frequency = cnt / t_delta
+            freq_list.append(frequency)
+
+        self.cadence_count = len(freq_list)
+        cadence_stre = sum(stre_list) / len(stre_list) if stre_list else 0
+
+        return cadence_time_list
+
+    def _slam_brake_detector(self):
+        # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
+        # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_dec_roc', 'slam_brake']].copy()
+        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'ip_dec', 'slam_brake']].copy()
+        # data['slam_diff'] = data['slam_brake'].diff()
+        # res_df = data[data['slam_diff'] == 1]
+
+        res_df = data[data['slam_brake'] == 1]
+        t_list = res_df['simTime'].values
+        f_list = res_df['simFrame'].values.tolist()
+
+        TIME_RANGE = 1
+        group_time = []
+        group_frame = []
+        sub_group_time = []
+        sub_group_frame = []
+        for i in range(len(f_list)):
+            if not sub_group_time or f_list[i] - f_list[i - 1] <= TIME_RANGE:  # 连续帧的算作同一组急刹
+                sub_group_time.append(t_list[i])
+                sub_group_frame.append(f_list[i])
+            else:
+                group_time.append(sub_group_time)
+                group_frame.append(sub_group_frame)
+                sub_group_time = [t_list[i]]
+                sub_group_frame = [f_list[i]]
+
+        group_time.append(sub_group_time)
+        group_frame.append(sub_group_frame)
+        group_time = [g for g in group_time if len(g) >= 2]  # 达到两帧算作一次急刹
+        group_frame = [g for g in group_frame if len(g) >= 2]
+
+        # 输出图表值
+        slam_brake_time = [[g[0], g[-1]] for g in group_time]
+        slam_brake_frame = [[g[0], g[-1]] for g in group_frame]
+
+        if slam_brake_time:
+            time_df = pd.DataFrame(slam_brake_time, columns=['start_time', 'end_time'])
+            frame_df = pd.DataFrame(slam_brake_frame, columns=['start_frame', 'end_frame'])
+            discomfort_df = pd.concat([time_df, frame_df], axis=1)
+            discomfort_df['type'] = 'slam_brake'
+            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        time_list = [element for sublist in group_time for element in sublist]
+        self.slam_brake_count = len(group_time)  # / self.mileage  # * 1000000
+        return time_list
+
+    def _slam_accel_detector(self):
+        # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
+        # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_acc_roc', 'slam_accel']].copy()
+        data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'ip_acc', 'slam_accel']].copy()
+        # data['slam_diff'] = data['slam_accel'].diff()
+        # res_df = data.loc[data['slam_diff'] == 1]
+
+        res_df = data.loc[data['slam_accel'] == 1]
+        t_list = res_df['simTime'].values
+        f_list = res_df['simFrame'].values.tolist()
+
+        group_time = []
+        group_frame = []
+        sub_group_time = []
+        sub_group_frame = []
+        for i in range(len(f_list)):
+            if not group_time or f_list[i] - f_list[i - 1] <= 1:  # 连续帧的算作同一组急加速
+                sub_group_time.append(t_list[i])
+                sub_group_frame.append(f_list[i])
+            else:
+                group_time.append(sub_group_time)
+                group_frame.append(sub_group_frame)
+                sub_group_time = [t_list[i]]
+                sub_group_frame = [f_list[i]]
+
+        group_time.append(sub_group_time)
+        group_frame.append(sub_group_frame)
+        group_time = [g for g in group_time if len(g) >= 2]
+        group_frame = [g for g in group_frame if len(g) >= 2]
+
+
+        # 输出图表值
+        slam_accel_time = [[g[0], g[-1]] for g in group_time]
+        slam_accel_frame = [[g[0], g[-1]] for g in group_frame]
+
+        if slam_accel_time:
+            time_df = pd.DataFrame(slam_accel_time, columns=['start_time', 'end_time'])
+            frame_df = pd.DataFrame(slam_accel_frame, columns=['start_frame', 'end_frame'])
+            discomfort_df = pd.concat([time_df, frame_df], axis=1)
+            discomfort_df['type'] = 'slam_accel'
+            self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        time_list = [element for sublist in group_time for element in sublist]
+        self.slam_accel_count = len(group_time)  # / self.mileage  # * 1000000
+        return time_list
+
+    def comf_statistic(self):
+        
+        df = self.ego_df[['simTime', 'cur_diff', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
+
+        self.zigzag_count_func()
+        self.cal_zigzag_strength_strength()
+        if self.zigzag_time_list:
+            zigzag_df = pd.DataFrame(self.zigzag_time_list, columns=['start_time', 'end_time'])
+            zigzag_df = get_frame_with_time(zigzag_df, self.ego_df)
+            zigzag_df['type'] = 'zigzag'
+            self.discomfort_df = pd.concat([self.discomfort_df, zigzag_df], ignore_index=True)
+            # discomfort_df = pd.concat([time_df, frame_df], axis=1)
+            # self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
+
+        zigzag_t_list = []
+        # 只有[t_start, t_end]数对,要提取为完整time list
+        t_list = df['simTime'].values.tolist()
+        for t_start, t_end in self.zigzag_time_list:
+            index_1 = t_list.index(t_start)
+            index_2 = t_list.index(t_end)
+            zigzag_t_list.extend(t_list[index_1:index_2 + 1])
+        zigzag_t_list = list(set(zigzag_t_list))
+        shake_t_list = self._shake_detector()
+        cadence_t_list = self._cadence_detector()
+        slam_brake_t_list = self._slam_brake_detector()
+        slam_accel_t_list = self._slam_accel_detector()
+
+    
+
+        discomfort_time_list = zigzag_t_list + shake_t_list + cadence_t_list + slam_brake_t_list + slam_accel_t_list
+        discomfort_time_list = sorted(discomfort_time_list)  # 排序
+        discomfort_time_list = list(set(discomfort_time_list))  # 去重
+
+        # TIME_DIFF = self.time_list[3] - self.time_list[2]
+        # TIME_DIFF = 0.4
+        FREQUENCY = 100
+        TIME_DIFF = 1 / FREQUENCY
+        self.discomfort_duration = len(discomfort_time_list) * TIME_DIFF
+
+        df['flag_zigzag'] = df['simTime'].apply(lambda x: 1 if x in zigzag_t_list else 0)
+        df['flag_shake'] = df['simTime'].apply(lambda x: 1 if x in shake_t_list else 0)
+        df['flag_cadence'] = df['simTime'].apply(lambda x: 1 if x in cadence_t_list else 0)
+        df['flag_slam_brake'] = df['simTime'].apply(lambda x: 1 if x in slam_brake_t_list else 0)
+        df['flag_slam_accel'] = df['simTime'].apply(lambda x: 1 if x in slam_accel_t_list else 0)
+
+
+        self.calculated_value = {
+            "weaving": self.zigzag_count,
+            "shake": self.shake_count,
+            "cadence": self.cadence_count,
+            "slamBrake": self.slam_brake_count,
+            "slamAccelerate": self.slam_accel_count
+        }
+        self.logger.info(f"舒适性计算完成,统计结果:{self.calculated_value}")
+        return self.calculated_value
+
+    def _nan_detect(self, num):
+        if math.isnan(num):
+            return 0
+        return num
+
+    def zip_time_pairs(self, zip_list):
+        zip_time_pairs = zip(self.time_list, zip_list)
+        zip_vs_time = [[x, "" if math.isnan(y) else y] for x, y in zip_time_pairs]
+        return zip_vs_time
+
+    def report_statistic(self):
+        comfort_result = self.comf_statistic()
+
+        # comfort_config_path = self.config_path / "comfort_config.yaml" #"comfort_config.yaml"  # "comfort_config.yaml"
+        evaluator = Score(self.data_processed.comfort_config)
+        result = evaluator.evaluate(comfort_result) 
+        print("\n[舒适性表现及得分情况]")
+        
+        return result
+    
+if __name__ == '__main__':
+    case_name = 'ICA'
+    mode_label = 'PGVIL'
+    
+    data = data_process.DataPreprocessing(case_name, mode_label)
+
+
+    comfort_instance = Comfort(data)  
+    # 调用实例方法 report_statistic,它不接受除 self 之外的参数  
+    try:  
+        comfort_result = comfort_instance.report_statistic() 
+        result = {'comfort': comfort_result}
+        
+    except Exception as e:  
+        print(f"An error occurred in Comfort.report_statistic: {e}")

+ 105 - 0
modules/metric/efficient.py

@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2024 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           xieguijin(xieguijin@china-icv.cn)
+@Data:              2024/12/23
+@Last Modified:     2024/12/23
+@Summary:           Efficient metrics calculation
+"""
+
+from modules.lib.score import Score
+from modules.lib import log_manager
+
+
+class Efficient:
+    STOP_SPEED_THRESHOLD = 0.05  # Speed threshold to consider as stop
+    STOP_TIME_THRESHOLD = 0.5  # Time threshold to consider as stop (in seconds)
+    FRAME_RANGE = 13  # Frame range to classify stop duration
+    
+    def __init__(self, data_processed):
+        
+        # self.logger = log.get_logger()  # 使用时再初始化
+       
+        self.data_processed = data_processed
+        self.df = data_processed.object_df.copy()
+        self.ego_df = data_processed.ego_data
+        self.stop_count = 0  # Initialize stop count
+        self.stop_duration = 0  # Initialize stop duration
+        
+    def _max_speed(self):
+        """Return the maximum speed in the ego data."""
+        return self.ego_df['v'].max()
+
+    def _deviation_speed(self):
+        """Return the variance of the speed in the ego data."""
+        return self.ego_df['v'].var()
+
+    def average_velocity(self):
+        """
+        Calculate the average velocity of the ego data.
+        Average velocity = total distance / total time
+        """
+        self.average_v = self.ego_df['v'].mean()
+        return self.average_v
+
+    def stop_duration_and_count(self):
+        """
+        Calculate stop duration and stop count based on the following:
+        - Stops are detected when speed is <= STOP_SPEED_THRESHOLD
+        - Stops should last more than STOP_TIME_THRESHOLD (in seconds)
+        """
+        stop_time_list = self.ego_df[self.ego_df['v'] <= self.STOP_SPEED_THRESHOLD]['simTime'].values.tolist()
+        stop_frame_list = self.ego_df[self.ego_df['v'] <= self.STOP_SPEED_THRESHOLD]['simFrame'].values.tolist()
+
+        stop_frame_group = []
+        stop_time_group = []
+        sum_stop_time = 0
+        f1, t1 = stop_frame_list[0] if stop_frame_list else 0, stop_time_list[0] if stop_time_list else 0
+
+        for i in range(1, len(stop_frame_list)):
+            if stop_frame_list[i] - stop_frame_list[i - 1] != 1:  # Frame discontinuity
+                f2, t2 = stop_frame_list[i - 1], stop_time_list[i - 1]
+                # If stop is valid (frame gap >= FRAME_RANGE and duration > STOP_TIME_THRESHOLD)
+                if f2 - f1 >= self.FRAME_RANGE:
+                    stop_frame_group.append((f1, f2))
+                    stop_time_group.append((t1, t2))
+                    sum_stop_time += (t2 - t1)
+                    self.stop_count += 1
+                # Update f1, t1
+                f1, t1 = stop_frame_list[i], stop_time_list[i]
+
+        # Check last stop segment
+        if len(stop_frame_list) > 0:
+            f2, t2 = stop_frame_list[-1], stop_time_list[-1]
+            if f2 - f1 >= self.FRAME_RANGE and f2 != self.ego_df['simFrame'].values[-1]:
+                stop_frame_group.append((f1, f2))
+                stop_time_group.append((t1, t2))
+                sum_stop_time += (t2 - t1)
+                self.stop_count += 1
+
+        # Calculate stop duration if stop count is not zero
+        self.stop_duration = sum_stop_time / self.stop_count if self.stop_count != 0 else 0
+        return self.stop_duration
+
+    def report_statistic(self):
+        """Generate the statistics and report the results."""
+        efficient_result = {
+            'maxSpeed': self._max_speed(),
+            'deviationSpeed': self._deviation_speed(),
+            'averagedSpeed': self.average_velocity(),
+            'stopDuration': self.stop_duration_and_count()
+        }
+        # self.logger.info(f"Efficient metrics calculation completed. Results: {efficient_result}")
+        
+        
+        
+        evaluator = Score(self.data_processed.efficient_config)
+        result = evaluator.evaluate(efficient_result) 
+
+        print("\n[高效性表现及评价结果]")
+        return result

+ 558 - 0
modules/metric/function.py

@@ -0,0 +1,558 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2025 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn)
+@Data:              2025/01/5
+@Last Modified:     2025/01/5
+@Summary:           Function Metrics Calculation
+"""
+
+import sys
+from pathlib import Path
+# 添加项目根目录到系统路径
+root_path = Path(__file__).resolve().parent.parent
+sys.path.append(str(root_path))
+
+from modules.lib.score import Score
+from modules.lib.log_manager import LogManager
+import numpy as np
+from typing import Dict, Tuple, Optional, Callable, Any
+import pandas as pd
+import yaml
+
+
+# ----------------------
+# 基础工具函数 (Pure functions)
+# ----------------------
+scenario_sign_dict = {"LeftTurnAssist": 206, "HazardousLocationW": 207, "RedLightViolationW": 208, "CoorperativeIntersectionPassing": 225, "GreenLightOptimalSpeedAdvisory": 234,
+                      "ForwardCollision": 212}
+
+def calculate_distance_PGVIL(ego_pos: np.ndarray, obj_pos: np.ndarray) -> np.ndarray:
+    """向量化距离计算"""
+    return np.linalg.norm(ego_pos - obj_pos, axis=1)
+
+
+def calculate_relative_speed_PGVIL(
+    ego_speed: np.ndarray, obj_speed: np.ndarray
+) -> np.ndarray:
+    """向量化相对速度计算"""
+    return np.linalg.norm(ego_speed - obj_speed, axis=1)
+
+def calculate_distance(ego_df: pd.DataFrame, correctwarning: int) -> np.ndarray:
+    """向量化距离计算"""
+    dist = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['ifwarning'].notna())]['relative_dist']
+    return dist
+
+
+def calculate_relative_speed(ego_df: pd.DataFrame, correctwarning: int) -> np.ndarray:
+    """向量化相对速度计算"""
+    return ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['ifwarning'].notna())]['composite_v']
+
+
+
+def extract_ego_obj(data: pd.DataFrame) -> Tuple[pd.Series, pd.DataFrame]:
+    """数据提取函数"""
+    ego = data[data["playerId"] == 1].iloc[0]
+    obj = data[data["playerId"] != 1]
+    return ego, obj
+
+    
+def get_first_warning(data_processed) -> Optional[pd.DataFrame]:
+    """带缓存的预警数据获取"""
+    ego_df = data_processed.ego_data
+    obj_df = data_processed.object_df
+
+    scenario_name = data_processed.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict.get(scenario_name)
+
+    if correctwarning is None:
+        print("无法获取正确的预警信号标志位!")
+        return None
+    warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['ifwarning'].notna())]
+    
+    warning_times = warning_rows['simTime']
+    if warning_times.empty:
+        print("没有找到预警数据!")
+        return None
+
+    first_time = warning_times.iloc[0]
+    return obj_df[obj_df['simTime'] == first_time]
+
+
+# ----------------------
+# 核心计算功能函数
+# ----------------------
+def latestWarningDistance_LST(data) -> dict:
+    """预警距离计算流水线"""
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    warning_dist = calculate_distance(ego_df, correctwarning)
+    if warning_dist.empty:
+        return {"latestWarningDistance_LST": 0.0}
+
+    return {"latestWarningDistance_LST": float(warning_dist.iloc[-1])}
+
+
+def earliestWarningDistance_LST(data) -> dict:
+    """预警距离计算流水线"""
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    warning_dist = calculate_distance(ego_df, correctwarning)
+    if warning_dist.empty:
+        return {"earliestWarningDistance_LST": 0.0}
+
+
+    return {"earliestWarningDistance_LST": float(warning_dist.iloc[0]) if len(warning_dist) > 0 else np.inf}
+
+
+def latestWarningDistance_TTC_LST(data) -> dict:
+    """TTC计算流水线"""
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    warning_dist = calculate_distance(ego_df, correctwarning)
+    if warning_dist.empty:
+        return {"latestWarningDistance_TTC_LST": 0.0}
+
+    warning_speed = calculate_relative_speed(ego_df, correctwarning)
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        ttc = np.where(warning_speed != 0, warning_dist / warning_speed, np.inf)
+
+    return {"latestWarningDistance_TTC_LST": float(ttc[-1]) if len(ttc) > 0 else np.inf}
+
+
+def earliestWarningDistance_TTC_LST(data) -> dict:
+    """TTC计算流水线"""
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    warning_dist = calculate_distance(ego_df, correctwarning)
+    if warning_dist.empty:
+        return {"earliestWarningDistance_TTC_LST": 0.0}
+
+    warning_speed = calculate_relative_speed(ego_df, correctwarning)
+
+    with np.errstate(divide='ignore', invalid='ignore'):
+        ttc = np.where(warning_speed != 0, warning_dist / warning_speed, np.inf)
+
+    return {"earliestWarningDistance_TTC_LST": float(ttc[0]) if len(ttc) > 0 else np.inf}
+
+def warningDelayTime_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    HMI_warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning)]['simTime'].tolist()
+    simTime_HMI = HMI_warning_rows[0] if len(HMI_warning_rows) > 0 else None
+    rosbag_warning_rows = ego_df[(ego_df['event_info.eventSource'].notna())]['simTime'].tolist()
+    simTime_rosbag = rosbag_warning_rows[0] if len(rosbag_warning_rows) > 0 else None
+    if (simTime_HMI is None) and (simTime_rosbag is None):
+        print("没有发出预警!")
+        delay_time = 100.0
+    elif (simTime_HMI is not None) and (simTime_rosbag is not None):
+        delay_time = abs(simTime_HMI - simTime_rosbag)
+    elif (simTime_HMI is not None) and (simTime_rosbag is None):
+        print("没有发出预警!")
+        delay_time = None
+    else:
+        delay_time = simTime_rosbag
+    return {"warningDelayTime_LST": delay_time}
+
+def warningDelayTimeOf4_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    ego_speed_simtime = ego_df[ego_df['accel'] <= -4]['simTime'].tolist() # 单位m/s^2
+    warning_simTime = ego_df[ego_df['ifwarning'] == correctwarning]['simTime'].tolist()
+    if (len(warning_simTime) == 0) and (len(ego_speed_simtime) == 0):
+        return {"warningDelayTimeOf4_LST": 0}
+    elif (len(warning_simTime) == 0) and (len(ego_speed_simtime) > 0):
+        return {"warningDelayTimeOf4_LST": ego_speed_simtime[0]}
+    elif (len(warning_simTime) > 0) and (len(ego_speed_simtime) == 0):
+        return {"warningDelayTimeOf4_LST": None}
+    else:
+        return {"warningDelayTimeOf4_LST": warning_simTime[0] - ego_speed_simtime[0]}
+
+def rightWarningSignal_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    if correctwarning.empty:
+        print("无法获取正确预警信号标志位!")
+        return
+    warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['ifwarning'].notna())]
+    if warning_rows.empty:
+        return {"rightWarningSignal_LST": -1}
+    else:
+        return {"rightWarningSignal_LST": 1}
+
+def ifCrossingRedLight_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    redlight_simtime = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['stateMask'] == 1) & (ego_df['relative_dist'] == 0) & (ego_df['v'] != 0)]['simTime']
+    if redlight_simtime.empty:
+        return {"ifCrossingRedLight_LST": 0}
+    else:
+        return {"ifCrossingRedLight_LST": 1}
+
+def ifStopgreenWaveSpeedGuidance_LST(data):
+    scenario_name = data.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+    ego_df = data.ego_data
+    greenlight_simtime = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['stateMask'] == 0) & (ego_df['v'] == 0)]['simTime']
+    if greenlight_simtime.empty:
+        return {"ifStopgreenWaveSpeedGuidance_LST": 0}
+    else:
+        return {"ifStopgreenWaveSpeedGuidance_LST": 1}
+
+def rightWarningSignal_PGVIL(data_processed) -> dict:
+    """判断是否发出正确预警信号"""
+
+    ego_df = data_processed.ego_data
+    scenario_name = data_processed.function_config["function"]["scenario"]["name"]
+    correctwarning = scenario_sign_dict[scenario_name]
+
+    if correctwarning is None:
+        print("无法获取正确的预警信号标志位!")
+        return None
+    # 找出本行 correctwarning 和 ifwarning 相等,且 correctwarning 不是 NaN 的行
+    warning_rows = ego_df[(ego_df['ifwarning'] == correctwarning) & (ego_df['ifwarning'].notna())]
+
+    if warning_rows.empty:
+        return {"rightWarningSignal_PGVIL": -1}
+    else:
+        return {"rightWarningSignal_PGVIL": 1}
+
+
+def latestWarningDistance_PGVIL(data_processed) -> dict:
+    """预警距离计算流水线"""
+    ego_df = data_processed.ego_data
+    obj_df = data_processed.object_df
+    warning_data = get_first_warning(data_processed)
+    if warning_data is None:
+        return {"latestWarningDistance_PGVIL": 0.0}
+
+    ego, obj = extract_ego_obj(warning_data)
+    distances = calculate_distance_PGVIL(
+        np.array([[ego["posX"], ego["posY"]]]), obj[["posX", "posY"]].values
+    )
+    if distances.size == 0:
+        print("没有找到数据!")
+        return {"latestWarningDistance_PGVIL": 15}  # 或返回其他默认值,如0.0
+
+    return {"latestWarningDistance_PGVIL": float(np.min(distances))}
+
+
+def latestWarningDistance_TTC_PGVIL(data_processed) -> dict:
+    """TTC计算流水线"""
+    ego_df = data_processed.ego_data
+    obj_df = data_processed.object_df
+
+    warning_data = get_first_warning(data_processed)
+    if warning_data is None:
+        return {"latestWarningDistance_TTC_PGVIL": 0.0}
+
+    ego, obj = extract_ego_obj(warning_data)
+    # 向量化计算
+    ego_pos = np.array([[ego["posX"], ego["posY"]]])
+    ego_speed = np.array([[ego["speedX"], ego["speedY"]]])
+    obj_pos = obj[["posX", "posY"]].values
+    obj_speed = obj[["speedX", "speedY"]].values
+
+    distances = calculate_distance_PGVIL(ego_pos, obj_pos)
+    rel_speeds = calculate_relative_speed_PGVIL(ego_speed, obj_speed)
+
+    with np.errstate(divide="ignore", invalid="ignore"):
+        ttc = np.where(rel_speeds != 0, distances / rel_speeds, np.inf)
+    if ttc.size == 0:
+        print("没有找到数据!")
+        return {"latestWarningDistance_TTC_PGVIL": 2}  # 或返回其他默认值,如0.0
+
+    return {"latestWarningDistance_TTC_PGVIL": float(np.nanmin(ttc))}
+
+def earliestWarningDistance_PGVIL(data_processed) -> dict:
+    """预警距离计算流水线"""
+    ego_df = data_processed.ego_data
+    obj_df = data_processed.object_df
+
+    warning_data = get_first_warning(data_processed)
+    if warning_data is None:
+        return {"earliestWarningDistance_PGVIL": 0}
+
+    ego, obj = extract_ego_obj(warning_data)
+    distances = calculate_distance_PGVIL(
+        np.array([[ego["posX"], ego["posY"]]]), obj[["posX", "posY"]].values
+    )
+    if distances.size == 0:
+        print("没有找到数据!")
+        return {"earliestWarningDistance_PGVIL": 15}  # 或返回其他默认值,如0.0
+
+    return {"earliestWarningDistance": float(np.min(distances))}
+
+def earliestWarningDistance_TTC_PGVIL(data_processed) -> dict:
+    """TTC计算流水线"""
+    ego_df = data_processed.ego_data
+    obj_df = data_processed.object_df
+
+    warning_data = get_first_warning(data_processed)
+    if warning_data is None:
+        return {"earliestWarningDistance_TTC_PGVIL": 0.0}
+
+    ego, obj = extract_ego_obj(warning_data)
+
+    # 向量化计算
+    ego_pos = np.array([[ego["posX"], ego["posY"]]])
+    ego_speed = np.array([[ego["speedX"], ego["speedY"]]])
+    obj_pos = obj[["posX", "posY"]].values
+    obj_speed = obj[["speedX", "speedY"]].values
+
+    distances = calculate_distance_PGVIL(ego_pos, obj_pos)
+    rel_speeds = calculate_relative_speed_PGVIL(ego_speed, obj_speed)
+
+    with np.errstate(divide="ignore", invalid="ignore"):
+        ttc = np.where(rel_speeds != 0, distances / rel_speeds, np.inf)
+    if ttc.size == 0:
+        print("没有找到数据!")
+        return {"earliestWarningDistance_TTC_PGVIL": 2}  # 或返回其他默认值,如0.0
+
+    return {"earliestWarningDistance_TTC_PGVIL": float(np.nanmin(ttc))}
+
+    
+# def delayOfEmergencyBrakeWarning(data_processed) -> dict:
+#     #预警时机相对背景车辆减速度达到-4m/s2后的时延
+#     ego_df = data_processed.ego_data
+#     obj_df = data_processed.object_df
+#     warning_data = get_first_warning(data_processed)
+#     if warning_data is None:
+#         return {"delayOfEmergencyBrakeWarning": -1}
+#     try:
+#         ego, obj = extract_ego_obj(warning_data)
+#         # 向量化计算
+#         obj_speed = np.array([[obj_df["speedX"], obj_df["speedY"]]])
+#         # 计算背景车辆减速度
+#         simtime_gap = obj["simTime"].iloc[1] - obj["simTime"].iloc[0]
+#         simtime_freq = 1 / simtime_gap#每秒采样频率
+#         # simtime_freq为一个时间窗,找出时间窗内的最大减速度
+#         obj_speed_magnitude = np.linalg.norm(obj_speed, axis=1)#速度向量的模长
+#         obj_speed_change = np.diff(speed_magnitude)#速度模长的变化量
+#         obj_deceleration = np.diff(obj_speed_magnitude) / simtime_gap
+#         #找到最大减速度,若最大减速度小于-4m/s2,则计算最大减速度对应的时间,和warning_data的差值进行对比
+#         max_deceleration = np.max(obj_deceleration)
+#         if max_deceleration < -4:
+#             max_deceleration_times = obj["simTime"].iloc[np.argmax(obj_deceleration)]
+#             max_deceleration_time = max_deceleration_times.iloc[0]
+#             delay_time = ego["simTime"] - max_deceleration_time
+#             return {"delayOfEmergencyBrakeWarning": float(delay_time)}
+
+#         else:
+#             print("没有达到预警减速度阈值:-4m/s^2")            
+#             return {"delayOfEmergencyBrakeWarning": -1}
+
+def warningDelayTime_PGVIL(data_processed) -> dict:
+    """车端接收到预警到HMI发出预警的时延"""    
+    ego_df = data_processed.ego_data
+    warning_data = get_first_warning(data_processed)
+
+    if warning_data is None:
+        return {"warningDelayTime_PGVIL": -1}
+    try:
+        ego, obj = extract_ego_obj(warning_data)
+        rosbag_warning_rows = ego_df[(ego_df['event_Type'].notna())]
+        
+        first_time = rosbag_warning_rows["simTime"].iloc[0]
+        warning_time = warning_data[warning_data["playerId"] == 1]["simTime"].iloc[0]
+        delay_time = warning_time - first_time
+        
+        return {"warningDelayTime_PGVIL": float(delay_time)}
+
+    except Exception as e:
+        print(f"计算预警时延时发生错误: {e}")
+        return {"warningDelayTime_PGVIL": -1}
+
+
+def get_car_to_stop_line_distance(ego, car_point, stop_line_points):
+    """
+    计算主车后轴中心点到停止线的距离
+    :return 距离
+    """
+    distance_carpoint_carhead = ego['dimX']/2 + ego['offX']
+    # 计算停止线的方向向量
+    line_vector = np.array(
+        [
+            stop_line_points[1][0] - stop_line_points[0][0],
+            stop_line_points[1][1] - stop_line_points[0][1],
+        ]
+    )
+    direction_vector_norm = np.linalg.norm(line_vector)
+    direction_vector_unit = line_vector / direction_vector_norm if direction_vector_norm != 0 else np.array([0, 0])
+    # 计算主车后轴中心点到停止线投影的坐标(垂足)
+    projection_length = np.dot(car_point - stop_line_points[0], direction_vector_unit)
+    perpendicular_foot = stop_line_points[0] + projection_length * direction_vector_unit
+    
+    # 计算主车后轴中心点到垂足的距离
+    distance_to_foot = np.linalg.norm(car_point - perpendicular_foot)
+    carhead_distance_to_foot = distance_to_foot - distance_carpoint_carhead
+    
+    return carhead_distance_to_foot
+
+def ifCrossingRedLight_PGVIL(data_processed) -> dict:
+    #判断车辆是否闯红灯
+
+    stop_line_points = np.array([(276.555,-35.575),(279.751,-33.683)])
+    X_OFFSET = 258109.4239876
+    Y_OFFSET = 4149969.964821
+    stop_line_points += np.array([[X_OFFSET, Y_OFFSET]])
+    ego_df = data_processed.ego_data
+    
+    prev_distance = float('inf')  # 初始化为正无穷大
+    """
+    traffic_light_status
+    0x100000为绿灯,1048576
+    0x1000000为黄灯,16777216
+    0x10000000为红灯,268435456
+    """
+    red_light_violation = False
+    for index ,ego in ego_df.iterrows():
+        car_point = (ego["posX"], ego["posY"])
+        stateMask = ego["stateMask"]
+        simTime = ego["simTime"]
+        distance_to_stopline = get_car_to_stop_line_distance(ego, car_point, stop_line_points)
+
+        #主车车头跨越停止线时非绿灯,返回-1,闯红灯
+        if prev_distance > 0 and distance_to_stopline < 0:
+            if stateMask != 1048576:
+                red_light_violation = True
+            break
+        prev_distance = distance_to_stopline
+
+    if red_light_violation:
+        return {"ifCrossingRedLight_PGVIL": -1}#闯红灯
+    else:
+        return {"ifCrossingRedLight_PGVIL": 1}#没有闯红灯
+
+
+# def ifStopgreenWaveSpeedGuidance(data_processed) -> dict:
+#     #在绿波车速引导期间是否发生停车
+
+
+# def mindisStopline(data_processed) -> dict:
+#     """
+#     当有停车让行标志/标线时车辆最前端与停车让行线的最小距离应在0-4m之间
+#     """
+#     ego_df = data_processed.ego_data
+#     obj_df = data_processed.object_df
+#     stop_giveway_simtime = ego_df[
+#         ego_df["sign_type1"] == 32 |
+#         ego_df["stopline_type"] == 3
+#     ]["simTime"]
+#     stop_giveway_data = ego_df[
+#         ego_df["sign_type1"] == 32 |
+#         ego_df["stopline_type"] == 3
+#     ]["simTime"]
+#     if stop_giveway_simtime.empty:
+#         print("没有停车让行标志/标线")
+        
+#     ego_data = stop_giveway_data[stop_giveway_data['playerId'] == 1]
+#     distance_carpoint_carhead = ego_data['dimX'].iloc[0]/2 + ego_data['offX'].iloc[0]
+#     distance_to_stoplines = []
+#     for _,row in ego_data.iterrows():
+#         ego_pos = np.array([row["posX"], row["posY"], row["posH"]])
+#         stop_line_points = [
+#             [row["stopline_x1"], row["stopline_y1"]],
+#             [row["stopline_x2"], row["stopline_y2"]],
+#         ]
+#         distance_to_stopline = get_car_to_stop_line_distance(ego_pos, stop_line_points)
+#         distance_to_stoplines.append(distance_to_stopline)
+    
+#     mindisStopline = np.min(distance_to_stoplines) - distance_carpoint_carhead
+#     return {"mindisStopline": mindisStopline}
+    
+   
+
+class FunctionRegistry:
+    """动态函数注册器(支持参数验证)"""
+
+    def __init__(self, data_processed):
+        self.logger = LogManager().get_logger()  # 获取全局日志实例
+        self.data = data_processed
+        self.fun_config = data_processed.function_config["function"]
+        self.level_3_merics = self._extract_level_3_metrics(self.fun_config)
+        self._registry: Dict[str, Callable] = {}
+        self._registry = self._build_registry()
+
+    def _extract_level_3_metrics(self, config_node: dict) -> list:
+        """DFS遍历提取第三层指标(时间复杂度O(n))[4](@ref)"""
+        metrics = []
+
+        def _recurse(node):
+            if isinstance(node, dict):
+                if "name" in node and not any(
+                    isinstance(v, dict) for v in node.values()
+                ):
+                    metrics.append(node["name"])
+                for v in node.values():
+                    _recurse(v)
+
+        _recurse(config_node)
+        self.logger.info(f"评比的功能指标列表:{metrics}")
+        return metrics
+
+    def _build_registry(self) -> dict:
+        """自动注册指标函数(防御性编程)"""
+        registry = {}
+        for func_name in self.level_3_merics:
+            try:
+                registry[func_name] = globals()[func_name]
+            except KeyError:
+                print(f"未实现指标函数: {func_name}")
+                self.logger.error(f"未实现指标函数: {func_name}")
+        return registry
+
+    def batch_execute(self) -> dict:
+        """批量执行指标计算(带熔断机制)"""
+        results = {}
+        for name, func in self._registry.items():
+            try:
+                result = func(self.data)  # 统一传递数据上下文
+                results.update(result)
+            except Exception as e:
+                print(f"{name} 执行失败: {str(e)}")
+                self.logger.error(f"{name} 执行失败: {str(e)}", exc_info=True)
+                results[name] = None
+        self.logger.info(f"功能指标计算结果:{results}")
+        return results
+
+
+class FunctionManager:
+    """管理功能指标计算的类"""
+
+    def __init__(self, data_processed):
+        self.data = data_processed
+        self.function = FunctionRegistry(self.data)
+
+    def report_statistic(self):
+        """
+        计算并报告功能指标结果。
+        :return: 评估结果
+        """
+        function_result = self.function.batch_execute()
+
+        evaluator = Score(self.data.function_config)
+        result = evaluator.evaluate(function_result)
+        print("\n[功能性表现及评价结果]")
+        return result
+        # self.logger.info(f'Function Result: {function_result}')
+
+
+# 使用示例
+if __name__ == "__main__":
+    pass
+    # print("\n[功能类表现及得分情况]")

+ 654 - 0
modules/metric/safety.py

@@ -0,0 +1,654 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+##################################################################
+#
+# Copyright (c) 2023 CICV, Inc. All Rights Reserved
+#
+##################################################################
+"""
+@Authors:           zhanghaiwen(zhanghaiwen@china-icv.cn)
+@Data:              2023/07/25
+@Last Modified:     2023/07/26
+@Summary:           safe metrics
+"""
+
+
+import math
+import numpy as np
+import pandas as pd
+
+from modules.lib.score import Score
+from modules.config import config
+
+from collections import defaultdict
+import scipy.integrate as spi
+
+
+class Safe(object):
+
+    def __init__(self, data_processed):
+
+        # self.logger = log.get_logger()  # 使用时再初始化
+
+        self.data_processed = data_processed
+
+        self.df = data_processed.object_df.copy()
+        self.ego_df = data_processed.ego_data
+        self.obj_id_list = data_processed.obj_id_list
+        self.metric_list = [
+            "TTC",
+            "MTTC",
+            "THW",
+            "LatSD",
+            "BTN",
+            "collisionRisk",
+            "collisionSeverity",
+        ]
+
+        # score data
+        self.calculated_value = {
+            "TTC": 1.0,
+            "MTTC": 1.0,
+            "THW": 1.0,
+            # 'LonSD': 0.0,
+            "LatSD": 0.0,
+            # 'DRAC': 0.0,
+            "BTN": 1.0,
+            # 'STN': 1.0,
+            "collisionRisk": 0.0,
+            "collisionSeverity": 0.0,
+        }
+
+        # lists of drving control info
+
+        self.time_list = self.ego_df["simTime"].values.tolist()
+
+        self.frame_list = self.ego_df["simFrame"].values.tolist()
+
+        self.collisionRisk = 0
+        self.empty_flag = True
+
+        # no car following scene
+        if len(self.obj_id_list) > 1:
+            self.unsafe_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            self.unsafe_time_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            self.unsafe_dist_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            # self.unsafe_acce_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
+            self.unsafe_acce_drac_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            self.unsafe_acce_xtn_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            self.unsafe_prob_df = pd.DataFrame(
+                columns=["start_time", "end_time", "start_frame", "end_frame", "type"]
+            )
+            self.most_dangerous = {}
+            self.pass_percent = {}
+            self._safe_param_cal_new()
+
+    def _safe_param_cal_new(self):
+
+        Tc = 0.3  # 安全距离
+
+        rho = Tc  # 驾驶员制动反应时间(单位:秒)
+        ego_accel_max = self.data_processed.vehicle_config[
+            "EGO_ACCEL_MAX"
+        ]  # 自车油门最大加速度
+        obj_decel_max = self.data_processed.vehicle_config[
+            "OBJ_DECEL_MAX"
+        ]  # 前车刹车最大减速度
+        ego_decel_min = self.data_processed.vehicle_config[
+            "EGO_DECEL_MIN"
+        ]  # 自车刹车最小减速度 ug
+        ego_decel_lon_max = self.data_processed.vehicle_config["EGO_DECEL_LON_MAX"]
+        ego_decel_lat_max = self.data_processed.vehicle_config["EGO_DECEL_LAT_MAX"]
+
+        # 构建双层字典数据结构
+        obj_dict = defaultdict(dict)
+        # 构建字典列表,每个元素是一个字典,包含'start_time', 'end_time', 'start_frame', 'end_frame', 'type'五个键值对
+        obj_data_dict = self.df.to_dict("records")
+
+        for item in obj_data_dict:
+            obj_dict[item["simFrame"]][item["playerId"]] = item
+
+        df_list = []
+        EGO_PLAYER_ID = 1
+
+        # self.empty_flag = True
+        for frame_num in self.frame_list:
+            ego_data = obj_dict[frame_num][EGO_PLAYER_ID]
+            v1 = ego_data["v"]  # km/h to m/s
+            x1 = ego_data["posX"]
+            y1 = ego_data["posY"]
+            h1 = ego_data["posH"]
+            # len1 = ego_data['dimX']
+            # width1 = ego_data['dimY']
+            # o_x1 = ego_data['offX']
+            v_x1 = ego_data["speedX"]  # km/h to m/s
+            v_y1 = ego_data["speedY"]  # km/h to m/s
+            a_x1 = ego_data["accelX"]
+            a_y1 = ego_data["accelY"]
+            # a1 = ego_data['accel']
+
+            for playerId in self.obj_id_list:
+                if playerId == EGO_PLAYER_ID:
+                    continue
+
+                try:
+                    obj_data = obj_dict[frame_num][playerId]
+                except KeyError:
+                    continue
+
+                x2 = obj_data["posX"]
+                y2 = obj_data["posY"]
+
+                dist = self.dist(x1, y1, x2, y2)
+                obj_data["dist"] = dist
+
+                v_x2 = obj_data["speedX"]  # km/h to m/s
+                v_y2 = obj_data["speedY"]  # km/h to m/s
+
+                v2 = obj_data["v"]  # km/h to m/s
+                # h2 = obj_data['posH']
+                # len2 = obj_data['dimX']
+                # width2 = obj_data['dimY']
+                # o_x2 = obj_data['offX']
+                a_x2 = obj_data["accelX"]
+                a_y2 = obj_data["accelY"]
+                # a2 = obj_data['accel']
+
+                dx, dy = x2 - x1, y2 - y1
+
+                # 定义矢量A和x轴正向向量x
+                A = np.array([dx, dy])
+                x = np.array([1, 0])
+
+                # 计算点积和向量长度
+                dot_product = np.dot(A, x)
+                vector_length_A = np.linalg.norm(A)
+                vector_length_x = np.linalg.norm(x)
+
+                # 计算夹角的余弦值
+                cos_theta = dot_product / (vector_length_A * vector_length_x)
+
+                # 将余弦值转换为角度值(弧度制)
+                beta = np.arccos(cos_theta)  # 如何通过theta正负确定方向
+
+                lon_d = dist * math.cos(beta - h1)
+                lat_d = abs(
+                    dist * math.sin(beta - h1)
+                )  # 需要增加左正右负的判断,但beta取值为[0,pi)
+
+                obj_dict[frame_num][playerId]["lon_d"] = lon_d
+                obj_dict[frame_num][playerId]["lat_d"] = lat_d
+
+                # 代码注释,这里是筛选出车前100米,车后5米,车右边4米的目标车,用于计算安全距离
+                # print(f"lon_d: {lon_d}, lat_d: {lat_d}")
+                if lon_d > 100 or lon_d < -5 or lat_d > 4:
+                    continue
+
+                self.empty_flag = False
+
+                vx, vy = v_x1 - v_x2, v_y1 - v_y2
+                ax, ay = a_x2 - a_x1, a_y2 - a_y1
+
+                v_ego_p = self._cal_v_ego_projection(dx, dy, v_x1, v_y1)
+                v_obj_p = self._cal_v_ego_projection(dx, dy, v_x2, v_y2)
+                vrel_projection_in_dist = self._cal_v_projection(dx, dy, vx, vy)
+                arel_projection_in_dist = self._cal_a_projection(
+                    dx, dy, vx, vy, ax, ay, x1, y1, x2, y2, v_x1, v_y1, v_x2, v_y2
+                )
+
+                obj_dict[frame_num][playerId][
+                    "vrel_projection_in_dist"
+                ] = vrel_projection_in_dist
+                obj_dict[frame_num][playerId][
+                    "arel_projection_in_dist"
+                ] = arel_projection_in_dist
+                obj_dict[frame_num][playerId]["v_ego_projection_in_dist"] = v_ego_p
+                obj_dict[frame_num][playerId]["v_obj_projection_in_dist"] = v_obj_p
+
+                obj_type = obj_data["type"]
+
+                TTC = self._cal_TTC(dist, vrel_projection_in_dist)
+                MTTC = self._cal_MTTC(
+                    TTC, vrel_projection_in_dist, arel_projection_in_dist
+                )
+                THW = self._cal_THW(dist, v_ego_p)
+
+                # 单车道时可用
+                # LonSD = self._cal_longitudinal_safe_dist(v_ego_p, v_obj_p, rho, ego_accel_max, ego_decel_min,
+                #                                          obj_decel_max)
+
+                lat_dist = 0.5
+                v_right = v1
+                v_left = v2
+                a_right_lat_brake_min = 1
+                a_left_lat_brake_min = 1
+                a_lat_max = 5
+
+                LatSD = self._cal_lateral_safe_dist(
+                    lat_dist,
+                    v_right,
+                    v_left,
+                    rho,
+                    a_right_lat_brake_min,
+                    a_left_lat_brake_min,
+                    a_lat_max,
+                )
+
+                # DRAC = self._cal_DRAC(dist, vrel_projection_in_dist, len1, len2, width1, width2, o_x1, o_x2)
+
+                lon_a1 = a_x1 * math.cos(h1) + a_y1 * math.sin(h1)
+                lon_a2 = a_x2 * math.cos(h1) + a_y2 * math.sin(h1)
+                lon_a = abs(lon_a1 - lon_a2)
+                lon_d = dist * abs(math.cos(beta - h1))
+                lon_v = v_x1 * math.cos(h1) + v_y1 * math.sin(h1)
+                BTN = self._cal_BTN_new(lon_a1, lon_a, lon_d, lon_v, ego_decel_lon_max)
+
+                lat_a1 = a_x1 * math.sin(h1) * -1 + a_y1 * math.cos(h1)
+                lat_a2 = a_x2 * math.sin(h1) * -1 + a_y2 * math.cos(h1)
+                lat_a = abs(lat_a1 - lat_a2)
+                lat_d = dist * abs(math.sin(beta - h1))
+                lat_v = v_x1 * math.sin(h1) * -1 + v_y1 * math.cos(h1)
+                # STN = self._cal_STN_new(TTC, lat_a1, lat_a, lat_d, lat_v, ego_decel_lat_max, width1, width2)
+
+                obj_dict[frame_num][playerId]["lat_v_rel"] = v_x1 - v_x2
+                obj_dict[frame_num][playerId]["lon_v_rel"] = v_y1 - v_y2
+
+                TTC = None if (not TTC or TTC < 0) else TTC
+                MTTC = None if (not MTTC or MTTC < 0) else MTTC
+                THW = None if (not THW or THW < 0) else THW
+
+                # 原DRAC计算已被注释,需移除相关代码引用
+                # DRAC = 10 if DRAC >= 10 else DRAC  # 删除这行
+                # 修改后的指标赋值
+                obj_dict[frame_num][playerId]["TTC"] = TTC
+                obj_dict[frame_num][playerId]["MTTC"] = MTTC
+                obj_dict[frame_num][playerId]["THW"] = THW
+                # obj_dict[frame_num][playerId]['LonSD'] = LonSD
+                obj_dict[frame_num][playerId]["LatSD"] = LatSD
+                # obj_dict[frame_num][playerId]['DRAC'] = DRAC  # 删除这行
+                obj_dict[frame_num][playerId]["BTN"] = abs(BTN)
+                # obj_dict[frame_num][playerId]['STN'] = abs(STN)  # 删除这行
+
+                # 确保以下变量已定义
+                collisionSeverity = (
+                    collisionSeverity if "collisionSeverity" in locals() else 0
+                )
+                pr_death = pr_death if "pr_death" in locals() else 0
+                collisionRisk = collisionRisk if "collisionRisk" in locals() else 0
+
+                obj_dict[frame_num][playerId]["collisionSeverity"] = (
+                    collisionSeverity * 100
+                )
+                obj_dict[frame_num][playerId]["pr_death"] = pr_death * 100
+                obj_dict[frame_num][playerId]["collisionRisk"] = collisionRisk * 100
+                # print(
+                #     f"{playerId} {frame_num} {TTC} {MTTC} {THW} {LatSD} {BTN} {collisionSeverity} {pr_death} {collisionRisk}"
+                # )
+
+            df_fnum = pd.DataFrame(obj_dict[frame_num].values())
+            df_list.append(df_fnum)
+
+        df_safe = pd.concat(df_list)
+
+        col_list = [
+            "simTime",
+            "simFrame",
+            "playerId",
+            "TTC",
+            "MTTC",
+            "THW",
+            "LatSD",
+            "BTN",  # 移除STN
+            "collisionSeverity",
+            "pr_death",
+            "collisionRisk",
+        ]
+        if not self.empty_flag:
+            self.df = df_safe[col_list].reset_index(drop=True)
+            # self.df_safe是有问题的,因为后边统计的函数里是没有self.df_safe这个变量的,只有self.df,
+        # 需要增加如果是空的情况下,self.df_safe的值为默认值。
+
+    def _cal_v_ego_projection(self, dx, dy, v_x1, v_y1):
+        # 计算 AB 连线的向量 AB
+        # dx = x2 - x1
+        # dy = y2 - y1
+
+        # 计算 AB 连线的模长 |AB|
+        AB_mod = math.sqrt(dx**2 + dy**2)
+
+        # 计算 AB 连线的单位向量 U_AB
+        U_ABx = dx / AB_mod
+        U_ABy = dy / AB_mod
+
+        # 计算 A 在 AB 连线上的速度 V1_on_AB
+        V1_on_AB = v_x1 * U_ABx + v_y1 * U_ABy
+
+        return V1_on_AB
+
+    def _cal_v_projection(self, dx, dy, vx, vy):
+        # 计算 AB 连线的向量 AB
+        # dx = x2 - x1
+        # dy = y2 - y1
+
+        # 计算 AB 连线的模长 |AB|
+        AB_mod = math.sqrt(dx**2 + dy**2)
+
+        # 计算 AB 连线的单位向量 U_AB
+        U_ABx = dx / AB_mod
+        U_ABy = dy / AB_mod
+
+        # 计算 A 相对于 B 的速度 V_relative
+        # vx = vx1 - vx2
+        # vy = vy1 - vy2
+
+        # 计算 A 相对于 B 在 AB 连线上的速度 V_on_AB
+        V_on_AB = vx * U_ABx + vy * U_ABy
+
+        return V_on_AB
+
+    def _cal_a_projection(
+        self, dx, dy, vx, vy, ax, ay, x1, y1, x2, y2, v_x1, v_y1, v_x2, v_y2
+    ):
+        # 计算 AB 连线的向量 AB
+        # dx = x2 - x1
+        # dy = y2 - y1
+
+        # 计算 θ
+        V_mod = math.sqrt(vx**2 + vy**2)
+        AB_mod = math.sqrt(dx**2 + dy**2)
+        if V_mod == 0 or AB_mod == 0:
+            return 0
+
+        cos_theta = (vx * dx + vy * dy) / (V_mod * AB_mod)
+        theta = math.acos(cos_theta)
+
+        # 计算 AB 连线的模长 |AB|
+        AB_mod = math.sqrt(dx**2 + dy**2)
+
+        # 计算 AB 连线的单位向量 U_AB
+        U_ABx = dx / AB_mod
+        U_ABy = dy / AB_mod
+
+        # 计算 A 相对于 B 的加速度 a_relative
+        # ax = ax1 - ax2
+        # ay = ay1 - ay2
+
+        # 计算 A 相对于 B 在 AB 连线上的加速度 a_on_AB
+        a_on_AB = ax * U_ABx + ay * U_ABy
+
+        VA = np.array([v_x1, v_y1])
+        VB = np.array([v_x2, v_y2])
+        D_A = np.array([x1, y1])
+        D_B = np.array([x2, y2])
+        V_r = VA - VB
+        V = np.linalg.norm(V_r)
+        w = self._cal_relative_angular_v(theta, D_A, D_B, VA, VB)
+        a_on_AB_back = self._calculate_derivative(a_on_AB, w, V, theta)
+        return a_on_AB_back
+
+    # 计算相对加速度
+    def _calculate_derivative(self, a, w, V, theta):
+        # 计算(V×cos(θ))'的值
+        # derivative = a * math.cos(theta) - w * V * math.sin(theta)theta
+        derivative = a - w * V * math.sin(theta)
+        return derivative
+
+    def _cal_relative_angular_v(self, theta, A, B, VA, VB):
+        dx = A[0] - B[0]
+        dy = A[1] - B[1]
+        dvx = VA[0] - VB[0]
+        dvy = VA[1] - VB[1]
+        # (dx * dvy - dy * dvx)
+        angular_velocity = (
+            math.sqrt(dvx**2 + dvy**2) * math.sin(theta) / math.sqrt(dx**2 + dy**2)
+        )
+        return angular_velocity
+
+    def _death_pr(self, obj_type, v_relative):
+        if obj_type == 5:
+            p_death = 1 / (1 + np.exp(7.723 - 0.15 * v_relative))
+        else:
+            p_death = 1 / (1 + np.exp(8.192 - 0.12 * v_relative))
+        return p_death
+
+    def _cal_collisionRisk_level(self, obj_type, v_relative, collisionSeverity):
+        if obj_type == 5:
+            p_death = 1 / (1 + np.exp(7.723 - 0.15 * v_relative))
+        else:
+            p_death = 1 / (1 + np.exp(8.192 - 0.12 * v_relative))
+        collisionRisk = 0.4 * p_death + 0.6 * collisionSeverity
+        return collisionRisk
+
+    # 求两车之间当前距离
+    def dist(self, x1, y1, x2, y2):
+        dist = np.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
+        return dist
+
+    # TTC (time to collision)
+    def _cal_TTC(self, dist, vrel_projection_in_dist):
+        if vrel_projection_in_dist == 0:
+            return math.inf
+        TTC = dist / vrel_projection_in_dist
+        return TTC
+
+    def _cal_MTTC(self, dist, vrel_projection_in_dist, arel_projection_in_dist):
+        MTTC = math.nan
+        if arel_projection_in_dist != 0:
+            tmp = vrel_projection_in_dist**2 + 2 * arel_projection_in_dist * dist
+            if tmp < 0:
+                return math.nan
+            t1 = (
+                -1 * vrel_projection_in_dist - math.sqrt(tmp)
+            ) / arel_projection_in_dist
+            t2 = (
+                -1 * vrel_projection_in_dist + math.sqrt(tmp)
+            ) / arel_projection_in_dist
+            if t1 > 0 and t2 > 0:
+                if t1 >= t2:
+                    MTTC = t2
+                elif t1 < t2:
+                    MTTC = t1
+            elif t1 > 0 and t2 <= 0:
+                MTTC = t1
+            elif t1 <= 0 and t2 > 0:
+                MTTC = t2
+        if arel_projection_in_dist == 0 and vrel_projection_in_dist > 0:
+            MTTC = dist / vrel_projection_in_dist
+        return MTTC
+
+    # THW (time headway)
+    def _cal_THW(self, dist, v_ego_projection_in_dist):
+        if not v_ego_projection_in_dist:
+            THW = None
+        else:
+            THW = dist / v_ego_projection_in_dist
+        return THW
+
+    def velocity(self, v_x, v_y):
+        v = math.sqrt(v_x**2 + v_y**2) * 3.6
+        return v
+
+    def _cal_longitudinal_safe_dist(
+        self, v_ego_p, v_obj_p, rho, ego_accel_max, ego_decel_min, ego_decel_max
+    ):
+        lon_dist_min = (
+            v_ego_p * rho
+            + ego_accel_max * (rho**2) / 2
+            + (v_ego_p + rho * ego_accel_max) ** 2 / (2 * ego_decel_min)
+            - v_obj_p**2 / (2 * ego_decel_max)
+        )
+        return lon_dist_min
+
+    def _cal_lateral_safe_dist(
+        self,
+        lat_dist,
+        v_right,
+        v_left,
+        rho,
+        a_right_lat_brake_min,
+        a_left_lat_brake_min,
+        a_lat_max,
+    ):
+        v_right_rho = v_right + rho * a_lat_max
+        v_left_rho = v_left + rho * a_lat_max
+        dist_min = lat_dist + (
+            (v_right + v_right_rho) * rho / 2
+            + v_right_rho**2 / a_right_lat_brake_min / 2
+            + ((v_left + v_right_rho) * rho / 2)
+            + v_left_rho**2 / a_left_lat_brake_min / 2
+        )
+        return dist_min
+
+    # DRAC (decelerate required avoid collision)
+    def _cal_DRAC(
+        self, dist, vrel_projection_in_dist, len1, len2, width1, width2, o_x1, o_x2
+    ):
+        dist_length = dist - (len2 / 2 - o_x2 + len1 / 2 + o_x1)  # 4.671
+        if dist_length < 0:
+            dist_width = dist - (width2 / 2 + width1 / 2)
+            if dist_width < 0:
+                return math.inf
+            else:
+                d = dist_width
+        else:
+            d = dist_length
+        DRAC = vrel_projection_in_dist**2 / (2 * d)
+        return DRAC
+
+    # BTN (brake threat number)
+    def _cal_BTN_new(self, lon_a1, lon_a, lon_d, lon_v, ego_decel_lon_max):
+        BTN = (
+            lon_a1 + lon_a - lon_v**2 / (2 * lon_d)
+        ) / ego_decel_lon_max  # max_ay为此车可实现的最大纵向加速度,目前为本次实例里的最大值
+        return BTN
+
+    # STN (steer threat number)
+    def _cal_STN_new(
+        self, ttc, lat_a1, lat_a, lat_d, lat_v, ego_decel_lat_max, width1, width2
+    ):
+        STN = (
+            lat_a1
+            + lat_a
+            + 2
+            / ttc**2
+            * (
+                lat_d
+                + abs(ego_decel_lat_max * lat_v) * (width1 + width2) / 2
+                + abs(lat_v * ttc)
+            )
+        ) / ego_decel_lat_max
+        return STN
+
+    # BTN (brake threat number)
+    def cal_BTN(self, a_y1, ay, dy, vy, max_ay):
+        BTN = (
+            a_y1 + ay - vy**2 / (2 * dy)
+        ) / max_ay  # max_ay为此车可实现的最大纵向加速度,目前为本次实例里的最大值
+        return BTN
+
+    # STN (steer threat number)
+    def cal_STN(self, ttc, a_x1, ax, dx, vx, max_ax, width1, width2):
+        STN = (
+            a_x1
+            + ax
+            + 2
+            / ttc**2
+            * (dx + np.sign(max_ax * vx) * (width1 + width2) / 2 + vx * ttc)
+        ) / max_ax
+        return STN
+
+    # 追尾碰撞风险
+    def _normal_distribution(self, x):
+        mean = 1.32
+        std_dev = 0.26
+        return (1 / (math.sqrt(std_dev * 2 * math.pi))) * math.exp(
+            -0.5 * (x - mean) ** 2 / std_dev
+        )
+
+    def continuous_group(self, df):
+        time_list = df["simTime"].values.tolist()
+        frame_list = df["simFrame"].values.tolist()
+
+        group_time = []
+        group_frame = []
+        sub_group_time = []
+        sub_group_frame = []
+
+        for i in range(len(frame_list)):
+            if not sub_group_time or frame_list[i] - frame_list[i - 1] <= 1:
+                sub_group_time.append(time_list[i])
+                sub_group_frame.append(frame_list[i])
+            else:
+                group_time.append(sub_group_time)
+                group_frame.append(sub_group_frame)
+                sub_group_time = [time_list[i]]
+                sub_group_frame = [frame_list[i]]
+
+        group_time.append(sub_group_time)
+        group_frame.append(sub_group_frame)
+        group_time = [g for g in group_time if len(g) >= 2]
+        group_frame = [g for g in group_frame if len(g) >= 2]
+
+        # 输出图表值
+        time = [[g[0], g[-1]] for g in group_time]
+        frame = [[g[0], g[-1]] for g in group_frame]
+
+        unfunc_time_df = pd.DataFrame(time, columns=["start_time", "end_time"])
+        unfunc_frame_df = pd.DataFrame(frame, columns=["start_frame", "end_frame"])
+
+        unfunc_df = pd.concat([unfunc_time_df, unfunc_frame_df], axis=1)
+        return unfunc_df
+        # 统计最危险的指标
+
+    def _safe_statistic_most_dangerous(self):
+
+        min_list = ["TTC", "MTTC", "THW", "LatSD"]
+        max_list = ["BTN", "collisionRisk", "collisionSeverity"]  # 移除STN
+
+        for metric in min_list:
+            if metric in self.metric_list:
+                if metric in self.df.columns:
+                    self.calculated_value[metric] = self.df[metric].min()
+                else:
+                    self.calculated_value[metric] = 10.0
+
+        for metric in max_list:
+            if metric in self.metric_list:
+                if metric in self.df.columns:
+                    self.calculated_value[metric] = self.df[metric].max()
+                else:
+                    self.calculated_value[metric] = 10.0
+        return self.calculated_value
+
+    def _safe_no_obj_statistic(self):
+        # list_metric = ["TTC","MTTC","THW","LonSD","LatSD","DRAC","BTN","STN","collisionSeverity", "collisionRisk"]
+
+        return self.calculated_value
+
+    def report_statistic(
+        self,
+    ):
+
+        safety_result = {}
+
+        if len(self.obj_id_list) == 1:
+            safety_result = self._safe_no_obj_statistic()
+        else:
+            safety_result = self._safe_statistic_most_dangerous()
+
+        evaluator = Score(self.data_processed.safety_config)
+        result = evaluator.evaluate(safety_result)
+        # self.logger.info(f"安全性表现及得分情况:[{result}]")
+        print("\n[安全性表现及得分情况]")
+
+        return result

+ 1221 - 0
modules/metric/traffic.py

@@ -0,0 +1,1221 @@
+
+
+import math
+import numpy as np
+import pandas as pd
+
+from modules.lib import log_manager
+
+
+from modules.lib.score import Score
+from modules.config import config
+
+
+
+class OvertakingViolation(object):
+    """超车违规类"""
+
+    def __init__(self, df_data):
+        print("超车违规类初始化中...")
+        self.traffic_violations_type = "超车违规类"
+
+        # self.logger = log.get_logger()  # 使用时再初始化
+
+        self.data = df_data.ego_data
+        self.ego_data = (
+            self.data[config.OVERTAKE_INFO].copy().reset_index(drop=True)
+        )  # Copy to avoid modifying the original DataFrame
+        header = self.ego_data.columns
+        if 2 in df_data.obj_id_list:
+            self.data_obj = df_data.obj_data[2]
+            self.obj_data = (
+                self.data_obj[config.OVERTAKE_INFO].copy().reset_index(drop=True)
+            )  # Copy to avoid modifying the original DataFrame
+        else:
+            self.obj_data = pd.DataFrame(columns=header)
+        if 3 in df_data.obj_id_list:
+            self.other_obj_data1 = df_data.obj_data[3]
+            self.other_obj_data = (
+                self.other_obj_data1[config.OVERTAKE_INFO].copy().reset_index(drop=True)
+            )
+        else:
+            self.other_obj_data = pd.DataFrame(columns=header)
+        self.overtake_on_right_count = 0
+        self.overtake_when_turn_around_count = 0
+        self.overtake_when_passing_car_count = 0
+        self.overtake_in_forbid_lane_count = 0
+        self.overtake_in_ramp_count = 0
+        self.overtake_in_tunnel_count = 0
+        self.overtake_on_accelerate_lane_count = 0
+        self.overtake_on_decelerate_lane_count = 0
+        self.overtake_in_different_senerios_count = 0
+
+    def different_road_area_simtime(self, df, threshold=0.5):
+        if not df:
+            return []
+        simtime_group = []
+        current_simtime_group = [df[0]]
+
+        for i in range(1, len(df)):
+            if abs(df[i] - df[i - 1]) <= threshold:
+                current_simtime_group.append(df[i])
+            else:
+                simtime_group.append(current_simtime_group)
+                current_simtime_group = [df[i]]
+
+        simtime_group.append(current_simtime_group)
+        return simtime_group
+
+    def _is_overtake(self, lane_id, dx, dy, ego_speedx, ego_speedy):
+        lane_start = lane_id[0]
+        lane_end = lane_id[-1]
+        start_condition = dx[0] * ego_speedx[0] + dy[0] * ego_speedy[0] >= 0
+        end_condition = dx[-1] * ego_speedx[-1] + dy[-1] * ego_speedy[-1] < 0
+
+        return lane_start == lane_end and start_condition and end_condition
+
+    def _is_dxy_of_car(self, ego_df, obj_df):
+        """
+        :param df: objstate.csv and so on
+        :param string_type: posX/Y or speedX/Y and so on
+        :return: dataframe of dx/y and so on
+        """
+        car_dx = obj_df["posX"].values - ego_df["posX"].values
+        car_dy = obj_df["posY"].values - ego_df["posY"].values
+
+        return car_dx, car_dy
+
+        # 在前车右侧超车、会车时超车、前车掉头时超车
+
+    def illegal_overtake_with_car(self, window_width=250):
+
+        # 获取csv文件中最短的帧数
+        frame_id_length = len(self.ego_data["simFrame"])
+        start_frame_id = self.ego_data["simFrame"].iloc[0]  # 获取起始点的帧数
+
+        while (start_frame_id + window_width) < frame_id_length:
+            # if start_frame_id == 828:
+            #     print("end")
+            simframe_window1 = list(
+                np.arange(start_frame_id, start_frame_id + window_width)
+            )
+            simframe_window = list(map(int, simframe_window1))
+            # 读取滑动窗口的dataframe数据
+            ego_data_frames = self.ego_data[
+                self.ego_data["simFrame"].isin(simframe_window)
+            ]
+            obj_data_frames = self.obj_data[
+                self.obj_data["simFrame"].isin(simframe_window)
+            ]
+            other_data_frames = self.other_obj_data[
+                self.other_obj_data["simFrame"].isin(simframe_window)
+            ]
+            # 读取前后的laneId
+            lane_id = ego_data_frames["lane_id"].tolist()
+            # 读取前后方向盘转角steeringWheel,
+            driverctrl_start_state = ego_data_frames["posH"].iloc[0]
+            driverctrl_end_state = ego_data_frames["posH"].iloc[-1]
+            # 读取车辆前后的位置信息
+            dx, dy = self._is_dxy_of_car(ego_data_frames, obj_data_frames)
+            ego_speedx = ego_data_frames["speedX"].tolist()
+            ego_speedy = ego_data_frames["speedY"].tolist()
+
+            obj_speedx = obj_data_frames[obj_data_frames["playerId"] == 2][
+                "speedX"
+            ].tolist()
+            obj_speedy = obj_data_frames[obj_data_frames["playerId"] == 2][
+                "speedY"
+            ].tolist()
+            if len(other_data_frames) > 0:
+                other_start_speedx = other_data_frames["speedX"].iloc[0]
+                other_start_speedy = other_data_frames["speedY"].iloc[0]
+                if (
+                    ego_speedx[0] * other_start_speedx
+                    + ego_speedy[0] * other_start_speedy
+                    < 0
+                ):
+                    self.overtake_when_passing_car_count += self._is_overtake(
+                        lane_id, dx, dy, ego_speedx, ego_speedy
+                    )
+                    start_frame_id += window_width
+            """
+            如果滑动窗口开始和最后的laneid一致;
+            方向盘转角前后方向相反(开始方向盘转角向右后来方向盘转角向左);
+            自车和前车的位置发生的交换;
+            则认为右超车
+            """
+            if driverctrl_start_state > 0 and driverctrl_end_state < 0:
+                self.overtake_on_right_count += self._is_overtake(
+                    lane_id, dx, dy, ego_speedx, ego_speedy
+                )
+                start_frame_id += window_width
+            elif (len(ego_speedx)*len(obj_speedx) > 0) and (ego_speedx[0] * obj_speedx[0] + ego_speedy[0] * obj_speedy[0] < 0):
+                self.overtake_when_turn_around_count += self._is_overtake(
+                    lane_id, dx, dy, ego_speedx, ego_speedy
+                )
+                start_frame_id += window_width
+            else:
+                start_frame_id += 1
+        # print(
+        #     f"在会车时超车{self.overtake_when_passing_car_count}次, 右侧超车{self.overtake_on_right_count}次, 在前车掉头时超车{self.overtake_when_turn_around_count}次")
+
+    # 借道超车场景
+    def overtake_in_forbid_lane(self):
+        simTime = self.obj_data["simTime"].tolist()
+        simtime_devide = self.different_road_area_simtime(simTime)
+        if len(simtime_devide) == 0:
+            self.overtake_in_forbid_lane_count += 0
+            return
+        else:
+            for simtime in simtime_devide:
+                lane_overtake = self.ego_data[self.ego_data["simTime"].isin(simtime)]
+                try:
+                    lane_type = lane_overtake["lane_type"].tolist()
+                    if (50002 in lane_type and len(set(lane_type)) > 2) or (
+                        50002 not in lane_type and len(set(lane_type)) > 1
+                    ):
+                        self.overtake_in_forbid_lane_count += 1
+                except Exception as e:
+                    print("数据缺少lane_type信息")
+            # print(f"在不该占用车道超车{self.overtake_in_forbid_lane_count}次")
+
+    # 在匝道超车
+    def overtake_in_ramp_area(self):
+        ramp_simtime_list = self.ego_data[(self.ego_data["road_type"] == 19)][
+            "simTime"
+        ].tolist()
+        if len(ramp_simtime_list) == 0:
+            self.overtake_in_ramp_count += 0
+            return
+        else:
+            ramp_simTime_list = self.different_road_area_simtime(ramp_simtime_list)
+            for ramp_simtime in ramp_simTime_list:
+                lane_id = self.ego_data["lane_id"].tolist()
+                ego_in_ramp = self.ego_data[self.ego_data["simTime"].isin(ramp_simtime)]
+                objstate_in_ramp = self.obj_data[
+                    self.obj_data["simTime"].isin(ramp_simtime)
+                ]
+                dx, dy = self._is_dxy_of_car(ego_in_ramp, objstate_in_ramp)
+                ego_speedx = ego_in_ramp["speedX"].tolist()
+                ego_speedy = ego_in_ramp["speedY"].tolist()
+                if len(lane_id) > 0:
+                    self.overtake_in_ramp_count += self._is_overtake(
+                        lane_id, dx, dy, ego_speedx, ego_speedy
+                    )
+                else:
+                    continue
+        # print(f"在匝道超车{self.overtake_in_ramp_count}次")
+
+    def overtake_in_tunnel_area(self):
+        tunnel_simtime_list = self.ego_data[(self.ego_data["road_type"] == 15)][
+            "simTime"
+        ].tolist()
+        if len(tunnel_simtime_list) == 0:
+            self.overtake_in_tunnel_count += 0
+            return
+        else:
+            tunnel_simTime_list = self.different_road_area_simtime(tunnel_simtime_list)
+            for tunnel_simtime in tunnel_simTime_list:
+                lane_id = self.ego_data["lane_id"].tolist()
+                ego_in_tunnel = self.ego_data[self.ego_data["simTime"].isin(tunnel_simtime)]
+                objstate_in_tunnel = self.obj_data[
+                    self.obj_data["simTime"].isin(tunnel_simtime)
+                ]
+                dx, dy = self._is_dxy_of_car(ego_in_tunnel, objstate_in_tunnel)
+                ego_speedx = ego_in_tunnel["speedX"].tolist()
+                ego_speedy = ego_in_tunnel["speedY"].tolist()
+                if len(lane_id) > 0:
+                    self.overtake_in_tunnel_count += self._is_overtake(
+                        lane_id, dx, dy, ego_speedx, ego_speedy
+                    )
+                else:
+                    continue
+            # print(f"在隧道超车{self.overtake_in_tunnel_count}次")
+
+    # 加速车道超车
+    def overtake_on_accelerate_lane(self):
+        accelerate_simtime_list = self.ego_data[self.ego_data["lane_type"] == 2][
+            "simTime"
+        ].tolist()
+        if len(accelerate_simtime_list) == 0:
+            self.overtake_on_accelerate_lane_count += 0
+            return
+        else:
+            accelerate_simTime_list = self.different_road_area_simtime(
+                accelerate_simtime_list
+            )
+            for accelerate_simtime in accelerate_simTime_list:
+                lane_id = self.ego_data["lane_id"].tolist()
+                ego_in_accelerate = self.ego_data[
+                    self.ego_data["simTime"].isin(accelerate_simtime)
+                ]
+                objstate_in_accelerate = self.obj_data[
+                    self.obj_data["simTime"].isin(accelerate_simtime)
+                ]
+                dx, dy = self._is_dxy_of_car(ego_in_accelerate, objstate_in_accelerate)
+                ego_speedx = ego_in_accelerate["speedX"].tolist()
+                ego_speedy = ego_in_accelerate["speedY"].tolist()
+
+                self.overtake_on_accelerate_lane_count += self._is_overtake(
+                    lane_id, dx, dy, ego_speedx, ego_speedy
+                )
+            # print(f"在加速车道超车{self.overtake_on_accelerate_lane_count}次")
+
+    # 减速车道超车
+    def overtake_on_decelerate_lane(self):
+        decelerate_simtime_list = self.ego_data[(self.ego_data["lane_type"] == 3)][
+            "simTime"
+        ].tolist()
+        if len(decelerate_simtime_list) == 0:
+            self.overtake_on_decelerate_lane_count += 0
+            return
+        else:
+            decelerate_simTime_list = self.different_road_area_simtime(
+                decelerate_simtime_list
+            )
+            for decelerate_simtime in decelerate_simTime_list:
+                lane_id = self.ego_data["id"].tolist()
+                ego_in_decelerate = self.ego_data[
+                    self.ego_data["simTime"].isin(decelerate_simtime)
+                ]
+                objstate_in_decelerate = self.obj_data[
+                    self.obj_data["simTime"].isin(decelerate_simtime)
+                ]
+                dx, dy = self._is_dxy_of_car(ego_in_decelerate, objstate_in_decelerate)
+                ego_speedx = ego_in_decelerate["speedX"].tolist()
+                ego_speedy = ego_in_decelerate["speedY"].tolist()
+
+                self.overtake_on_decelerate_lane_count += self._is_overtake(
+                    lane_id, dx, dy, ego_speedx, ego_speedy
+                )
+            # print(f"在减速车道超车{self.overtake_on_decelerate_lane_count}次")
+
+    # 在交叉路口
+    def overtake_in_different_senerios(self):
+        crossroad_simTime = self.ego_data[self.ego_data["interid"] != 10000][
+            "simTime"
+        ].tolist()  # 判断是路口或者隧道区域
+        if len(crossroad_simTime) == 0:
+            self.overtake_in_different_senerios_count += 0
+            return
+        else:
+            # 筛选在路口或者隧道区域的objectstate、driverctrl、laneinfo数据
+            crossroad_ego = self.ego_data[self.ego_data["simTime"].isin(crossroad_simTime)]
+            crossroad_objstate = self.obj_data[
+                self.obj_data["simTime"].isin(crossroad_simTime)
+            ]
+            # crossroad_laneinfo = self.laneinfo_new_data[self.laneinfo_new_data['simTime'].isin(crossroad_simTime)]
+
+            # 读取前后的laneId
+            lane_id = crossroad_ego["lane_id"].tolist()
+
+            # 读取车辆前后的位置信息
+            dx, dy = self._is_dxy_of_car(crossroad_ego, crossroad_objstate)
+            ego_speedx = crossroad_ego["speedX"].tolist()
+            ego_speedy = crossroad_ego["speedY"].tolist()
+            """
+            如果滑动窗口开始和最后的laneid一致;
+            自车和前车的位置发生的交换;
+            则认为发生超车
+            """
+            if len(lane_id) > 0:
+                self.overtake_in_different_senerios_count += self._is_overtake(
+                    lane_id, dx, dy, ego_speedx, ego_speedy
+                )
+            else:
+                pass
+            # print(f"在路口超车{self.overtake_in_different_senerios_count}次")
+
+    def statistic(self):
+        if len(self.obj_data) == 0:
+            pass
+        else:
+            self.overtake_in_forbid_lane()
+            self.overtake_on_decelerate_lane()
+            self.overtake_on_accelerate_lane()
+            self.overtake_in_ramp_area()
+            self.overtake_in_tunnel_area()
+            self.overtake_in_different_senerios()
+            self.illegal_overtake_with_car()
+
+        self.calculated_value = {
+            "overtake_on_right": self.overtake_on_right_count,
+            "overtake_when_turn_around": self.overtake_when_turn_around_count,
+            "overtake_when_passing_car": self.overtake_when_passing_car_count,
+            "overtake_in_forbid_lane": self.overtake_in_forbid_lane_count,
+            "overtake_in_ramp": self.overtake_in_ramp_count,
+            "overtake_in_tunnel": self.overtake_in_tunnel_count,
+            "overtake_on_accelerate_lane": self.overtake_on_accelerate_lane_count,
+            "overtake_on_decelerate_lane": self.overtake_on_decelerate_lane_count,
+            "overtake_in_different_senerios": self.overtake_in_different_senerios_count,
+        }
+        # self.logger.info(f"超车类指标统计完成,统计结果:{self.calculated_value}")
+        return self.calculated_value
+
+
+class SlowdownViolation(object):
+    """减速让行违规类"""
+
+    def __init__(self, df_data):
+        print("减速让行违规类-------------------------")
+        self.traffic_violations_type = "减速让行违规类"
+        self.object_items = []
+        self.data = df_data.ego_data
+        self.ego_data = (
+            self.data[config.SLOWDOWN_INFO].copy().reset_index(drop=True)
+        )  # Copy to avoid modifying the original DataFrame
+        self.pedestrian_data = pd.DataFrame()
+
+        self.object_items = set(df_data.object_df.type.tolist())
+        if 13 in self.object_items:  # 行人的type是13
+            self.pedestrian_df = df_data.object_df[df_data.object_df.type == 13]
+            self.pedestrian_data = (
+                self.pedestrian_df[config.SLOWDOWN_INFO].copy().reset_index(drop=True)
+            )
+
+        self.slow_down_in_crosswalk_count = 0
+        self.avoid_pedestrian_in_crosswalk_count = 0
+        self.avoid_pedestrian_in_the_road_count = 0
+        self.aviod_pedestrian_when_turning_count = 0
+
+    def pedestrian_in_front_of_car(self):
+        if len(self.pedestrian_data) == 0:
+            return []
+        else:
+            self.ego_data["dx"] = self.ego_data["posX"] - self.pedestrian_data["posX"]
+            self.ego_data["dy"] = self.ego_data["posY"] - self.pedestrian_data["posY"]
+            self.ego_data["dist"] = np.sqrt(
+                self.ego_data["dx"] ** 2 + self.ego_data["dy"] ** 2
+            )
+
+            self.ego_data["rela_pos"] = (
+                self.ego_data["dx"] * self.ego_data["speedX"]
+                + self.ego_data["dy"] * self.ego_data["speedY"]
+            )
+            simtime = self.ego_data[
+                (self.ego_data["rela_pos"] > 0) & (self.ego_data["dist"] < 50)
+            ]["simTime"].tolist()
+            return simtime
+
+    def different_road_area_simtime(self, df, threshold=0.6):
+        if not df:
+            return []
+        simtime_group = []
+        current_simtime_group = [df[0]]
+
+        for i in range(1, len(df)):
+            if abs(df[i] - df[i - 1]) <= threshold:
+                current_simtime_group.append(df[i])
+            else:
+                simtime_group.append(current_simtime_group)
+                current_simtime_group = [df[i]]
+
+        simtime_group.append(current_simtime_group)
+        return simtime_group
+
+    def slow_down_in_crosswalk(self):
+        # 筛选出路口或隧道区域的时间点
+        crosswalk_simTime = self.ego_data[self.ego_data["crossid"] != 20000][
+            "simTime"
+        ].tolist()
+        if len(crosswalk_simTime) == 0:
+            self.slow_down_in_crosswalk_count += 0
+            return
+        else:
+            crosswalk_simTime_divide = self.different_road_area_simtime(crosswalk_simTime)
+
+            for crosswalk_simtime in crosswalk_simTime_divide:
+                # 筛选出当前时间段内的数据
+                # start_time, end_time = crosswalk_simtime
+                start_time = crosswalk_simtime[0]
+                end_time = crosswalk_simtime[-1]
+                print(f"当前时间段:{start_time} - {end_time}")
+                crosswalk_objstate = self.ego_data[
+                    (self.ego_data["simTime"] >= start_time)
+                    & (self.ego_data["simTime"] <= end_time)
+                ]
+
+                # 计算车辆速度
+                ego_speedx = np.array(crosswalk_objstate["speedX"].tolist())
+                ego_speedy = np.array(crosswalk_objstate["speedY"].tolist())
+                ego_speed = np.sqrt(ego_speedx**2 + ego_speedy**2)
+
+                # 判断是否超速
+                if max(ego_speed) > 15 / 3.6:  # 15 km/h 转换为 m/s
+                    self.slow_down_in_crosswalk_count += 1
+
+            # 输出总次数
+            print(f"在人行横道超车总次数:{self.slow_down_in_crosswalk_count}次")
+
+    def avoid_pedestrian_in_crosswalk(self):
+        crosswalk_simTime = self.ego_data[self.ego_data["crossid"] != 20000][
+            "simTime"
+        ].tolist()
+        if len(crosswalk_simTime) == 0:
+            self.avoid_pedestrian_in_crosswalk_count += 0
+            return
+        else:
+            crosswalk_simTime_devide = self.different_road_area_simtime(crosswalk_simTime)
+            for crosswalk_simtime in crosswalk_simTime_devide:
+                if not self.pedestrian_data.empty:
+                    crosswalk_objstate = self.pedestrian_data[
+                        self.pedestrian_data["simTime"].isin(crosswalk_simtime)
+                    ]
+                else:
+                    crosswalk_objstate = pd.DataFrame()
+                if len(crosswalk_objstate) > 0:
+                    pedestrian_simtime = crosswalk_objstate["simTime"]
+                    pedestrian_objstate = crosswalk_objstate[
+                        crosswalk_objstate["simTime"].isin(pedestrian_simtime)
+                    ]
+                    ego_speed = np.sqrt(
+                        pedestrian_objstate["speedX"] ** 2
+                        + pedestrian_objstate["speedY"] ** 2
+                    )
+                    if ego_speed.any() > 0:
+                        self.avoid_pedestrian_in_crosswalk_count += 1
+
+    def avoid_pedestrian_in_the_road(self):
+        simtime = self.pedestrian_in_front_of_car()
+        if len(simtime) == 0:
+            self.avoid_pedestrian_in_the_road_count += 0
+            return
+        else:
+            pedestrian_on_the_road = self.pedestrian_data[
+                self.pedestrian_data["simTime"].isin(simtime)
+            ]
+            simTime = pedestrian_on_the_road["simTime"].tolist()
+            simTime_devide = self.different_road_area_simtime(simTime)
+            if len(simTime_devide) == 0:
+                pass
+            else:
+                for simtime1 in simTime_devide:
+                    sub_pedestrian_on_the_road = pedestrian_on_the_road[
+                        pedestrian_on_the_road["simTime"].isin(simtime1)
+                    ]
+                    ego_car = self.ego_data.loc[(self.ego_data["simTime"].isin(simtime1))]
+                    dist = np.sqrt(
+                        (ego_car["posX"].values - sub_pedestrian_on_the_road["posX"].values)
+                        ** 2
+                        + (
+                            ego_car["posY"].values
+                            - sub_pedestrian_on_the_road["posY"].values
+                        )
+                        ** 2
+                    )
+                    speed = np.sqrt(
+                        ego_car["speedX"].values ** 2 + ego_car["speedY"].values ** 2
+                    )
+                    data = {"dist": dist, "speed": speed}
+                    new_ego_car = pd.DataFrame(data)
+                    new_ego_car = new_ego_car.assign(
+                        Column3=lambda x: (x["dist"] < 1) & (x["speed"] == 0)
+                    )
+                    if new_ego_car["Column3"].any():
+                        self.avoid_pedestrian_in_the_road_count += 1
+
+    def aviod_pedestrian_when_turning(self):
+        pedestrian_simtime_list = self.pedestrian_in_front_of_car()
+        if len(pedestrian_simtime_list) > 0:
+            simtime_list = self.ego_data[
+                (self.ego_data["simTime"].isin(pedestrian_simtime_list))
+                & (self.ego_data["lane_type"] == 20)
+            ]["simTime"].tolist()
+            simTime_list = self.different_road_area_simtime(simtime_list)
+            pedestrian_on_the_road = self.pedestrian_data[
+                self.pedestrian_data["simTime"].isin(simtime_list)
+            ]
+            if len(simTime_list) == 0:
+                pass
+            else:
+                for simtime in simTime_list:
+                    sub_pedestrian_on_the_road = pedestrian_on_the_road[
+                        pedestrian_on_the_road["simTime"].isin(simtime)
+                    ]
+                    if len(sub_pedestrian_on_the_road) > 0:
+                        ego_car = self.ego_data.loc[(self.ego_data["simTime"].isin(simtime))]
+                        ego_car["dist"] = np.sqrt(
+                            (ego_car["posX"].values - sub_pedestrian_on_the_road["posX"].values)
+                            ** 2
+                            + (
+                                ego_car["posY"].values
+                                - sub_pedestrian_on_the_road["posY"].values
+                            )
+                            ** 2
+                        )
+                        ego_car["speed"] = np.sqrt(
+                            ego_car["speedX"].values ** 2 + ego_car["speedY"].values ** 2
+                        )
+                        if any(ego_car["speed"].tolist()) != 0:
+                            self.aviod_pedestrian_when_turning_count += 1
+
+    def statistic(self):
+        self.slow_down_in_crosswalk()
+        self.avoid_pedestrian_in_crosswalk()
+        self.avoid_pedestrian_in_the_road()
+        self.aviod_pedestrian_when_turning()
+
+        self.calculated_value = {
+            "slow_down_in_crosswalk": self.slow_down_in_crosswalk_count,
+            "avoid_pedestrian_in_crosswalk": self.avoid_pedestrian_in_crosswalk_count,
+            "avoid_pedestrian_in_the_road": self.avoid_pedestrian_in_the_road_count,
+            "aviod_pedestrian_when_turning": self.aviod_pedestrian_when_turning_count,
+        }
+        # self.logger.info(f"减速让行类指标统计完成,统计结果:{self.calculated_value}")
+        return self.calculated_value
+
+
+class TurnaroundViolation(object):
+
+    def __init__(self, df_data):
+        print("掉头违规类初始化中...")
+        self.traffic_violations_type = "掉头违规类"
+
+        self.data = df_data.obj_data[1]
+        self.ego_data = (
+            self.data[config.TURNAROUND_INFO].copy().reset_index(drop=True)
+        )  # Copy to avoid modifying the original DataFrame
+        self.pedestrian_data = pd.DataFrame()
+
+        self.object_items = set(df_data.object_df.type.tolist())
+        if 13 in self.object_items:  # 行人的type是13
+            self.pedestrian_df = df_data.object_df[df_data.object_df.type == 13]
+            self.pedestrian_data = (
+                self.pedestrian_df[config.SLOWDOWN_INFO].copy().reset_index(drop=True)
+            )
+
+        self.turning_in_forbiden_turn_back_sign_count = 0
+        self.turning_in_forbiden_turn_left_sign_count = 0
+        self.avoid_pedestrian_when_turn_back_count = 0
+
+    def pedestrian_in_front_of_car(self):
+        if len(self.pedestrian_data) == 0:
+            return []
+        else:
+            self.ego_data["dx"] = self.ego_data["posX"] - self.pedestrian_data["posX"]
+            self.ego_data["dy"] = self.ego_data["posY"] - self.pedestrian_data["posY"]
+            self.ego_data["dist"] = np.sqrt(
+                self.ego_data["dx"] ** 2 + self.ego_data["dy"] ** 2
+            )
+
+            self.ego_data["rela_pos"] = (
+                self.ego_data["dx"] * self.ego_data["speedX"]
+                + self.ego_data["dy"] * self.ego_data["speedY"]
+            )
+            simtime = self.ego_data[
+                (self.ego_data["rela_pos"] > 0) & (self.ego_data["dist"] < 50)
+            ]["simTime"].tolist()
+            return simtime
+
+    def different_road_area_simtime(self, df, threshold=0.5):
+        if not df:
+            return []
+        simtime_group = []
+        current_simtime_group = [df[0]]
+
+        for i in range(1, len(df)):
+            if abs(df[i] - df[i - 1]) <= threshold:
+                current_simtime_group.append(df[i])
+            else:
+                simtime_group.append(current_simtime_group)
+                current_simtime_group = [df[i]]
+
+        simtime_group.append(current_simtime_group)
+        return simtime_group
+
+    def turn_back_in_forbiden_sign(self):
+        """
+        禁止掉头type = 8
+        """
+        forbiden_turn_back_simTime = self.ego_data[self.ego_data["sign_type1"] == 8][
+            "simTime"
+        ].tolist()
+        forbiden_turn_left_simTime = self.ego_data[self.ego_data["sign_type1"] == 9][
+            "simTime"
+        ].tolist()
+
+        forbiden_turn_back_simtime_devide = self.different_road_area_simtime(
+            forbiden_turn_back_simTime
+        )
+        forbiden_turn_left_simtime_devide = self.different_road_area_simtime(
+            forbiden_turn_left_simTime
+        )
+        if len(forbiden_turn_back_simtime_devide) == 0:
+            pass
+        else:
+            for forbiden_turn_back_simtime in forbiden_turn_back_simtime_devide:
+                ego_car1 = self.ego_data.loc[
+                    (self.ego_data["simFrame"].isin(forbiden_turn_back_simtime))
+                ]
+                ego_start_speedx1 = ego_car1["speedX"].iloc[0]
+                ego_start_speedy1 = ego_car1["speedY"].iloc[0]
+                ego_end_speedx1 = ego_car1["speedX"].iloc[-1]
+                ego_end_speedy1 = ego_car1["speedY"].iloc[-1]
+
+                if (
+                    ego_end_speedx1 * ego_start_speedx1
+                    + ego_end_speedy1 * ego_start_speedy1
+                    < 0
+                ):
+                    self.turning_in_forbiden_turn_back_sign_count += 1
+        if len(forbiden_turn_left_simtime_devide) == 0:
+            pass
+        else:
+            for forbiden_turn_left_simtime in forbiden_turn_left_simtime_devide:
+                ego_car2 = self.ego_data.loc[
+                    (self.ego_data["simFrame"].isin(forbiden_turn_left_simtime))
+                ]
+                ego_start_speedx2 = ego_car2["speedX"].iloc[0]
+                ego_start_speedy2 = ego_car2["speedY"].iloc[0]
+                ego_end_speedx2 = ego_car2["speedX"].iloc[-1]
+                ego_end_speedy2 = ego_car2["speedY"].iloc[-1]
+
+                if (
+                    ego_end_speedx2 * ego_start_speedx2
+                    + ego_end_speedy2 * ego_start_speedy2
+                    < 0
+                ):
+                    self.turning_in_forbiden_turn_left_sign_count += 1
+
+    def avoid_pedestrian_when_turn_back(self):
+        sensor_on_intersection = self.pedestrian_in_front_of_car()
+        avoid_pedestrian_when_turn_back_simTime_list = self.ego_data[
+            self.ego_data["lane_type"] == 20
+        ]["simTime"].tolist()
+        avoid_pedestrian_when_turn_back_simTime_devide = (
+            self.different_road_area_simtime(
+                avoid_pedestrian_when_turn_back_simTime_list
+            )
+        )
+        if (len(sensor_on_intersection) > 0) and (len(avoid_pedestrian_when_turn_back_simTime_devide) > 0):
+            for (
+                avoid_pedestrian_when_turn_back_simtime
+            ) in avoid_pedestrian_when_turn_back_simTime_devide:
+                pedestrian_in_intersection_simtime = self.pedestrian_data[
+                    self.pedestrian_data["simTime"].isin(
+                        avoid_pedestrian_when_turn_back_simtime
+                    )
+                ].tolist()
+                ego_df = self.ego_data[
+                    self.ego_data["simTime"].isin(pedestrian_in_intersection_simtime)
+                ].reset_index(drop=True)
+                pedestrian_df = self.pedestrian_data[
+                    self.pedestrian_data["simTime"].isin(
+                        pedestrian_in_intersection_simtime
+                    )
+                ].reset_index(drop=True)
+                ego_df["dist"] = np.sqrt(
+                    (ego_df["posx"] - pedestrian_df["posx"]) ** 2
+                    + (ego_df["posy"] - pedestrian_df["posy"]) ** 2
+                )
+                ego_df["speed"] = np.sqrt(ego_df["speedx"] ** 2 + ego_df["speedy"] ** 2)
+                if (any(ego_df["speed"].tolist()) != 0) or (any(ego_df["dist"].tolist()) == 0):
+                    self.avoid_pedestrian_when_turn_back_count += 1
+
+    def statistic(self):
+        self.turn_back_in_forbiden_sign()
+        self.avoid_pedestrian_when_turn_back()
+
+        self.calculated_value = {
+            "turn_back_in_forbiden_turn_back_sign": self.turning_in_forbiden_turn_back_sign_count,
+            "turn_back_in_forbiden_turn_left_sign": self.turning_in_forbiden_turn_left_sign_count,
+            "avoid_pedestrian_when_turn_back": self.avoid_pedestrian_when_turn_back_count,
+        }
+        # self.logger.info(f"掉头违规类指标统计完成,统计结果:{self.calculated_value}")
+        return self.calculated_value
+
+
+class WrongWayViolation:
+    """停车违规类"""
+
+    def __init__(self, df_data):
+        print("停车违规类初始化中...")
+        self.traffic_violations_type = "停车违规类"
+        self.data = df_data.obj_data[1]
+        # 初始化违规统计
+        self.violation_count = {
+            "urbanExpresswayOrHighwayDrivingLaneStopped": 0,
+            "urbanExpresswayOrHighwayEmergencyLaneStopped": 0,
+            "urbanExpresswayEmergencyLaneDriving": 0,
+        }
+
+    def process_violations(self):
+        """处理停车或者紧急车道行驶违规数据"""
+        # 提取有效道路类型
+        urban_expressway_or_highway = {1, 2}
+        driving_lane = {1, 4, 5, 6}
+        emergency_lane = {12}
+        self.data["v"] *= 3.6  # 转换速度
+
+        # 使用向量化和条件判断进行违规判定
+        conditions = [
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & self.data["lane_type"].isin(driving_lane)
+                & (self.data["v"] == 0)
+            ),
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & self.data["lane_type"].isin(emergency_lane)
+                & (self.data["v"] == 0)
+            ),
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & self.data["lane_type"].isin(emergency_lane)
+                & (self.data["v"] != 0)
+            ),
+        ]
+
+        violation_types = [
+            "urbanExpresswayOrHighwayDrivingLaneStopped",
+            "urbanExpresswayOrHighwayEmergencyLaneStopped",
+            "urbanExpresswayEmergencyLaneDriving",
+        ]
+
+        # 设置违规类型
+        self.data["violation_type"] = None
+        for condition, violation_type in zip(conditions, violation_types):
+            self.data.loc[condition, "violation_type"] = violation_type
+
+        # 统计违规情况
+        self.violation_count = (
+            self.data["violation_type"]
+            .value_counts()
+            .reindex(violation_types, fill_value=0)
+            .to_dict()
+        )
+
+    def statistic(self) -> str:
+
+        self.process_violations()
+        # self.logger.info(f"停车违规类指标统计完成,统计结果:{self.violation_count}")
+        return self.violation_count
+
+
+class SpeedingViolation(object):
+    """超速违规类"""
+
+    """ 这里没有道路标志牌限速指标,因为shp地图中没有这个信息"""
+
+    def __init__(self, df_data):
+        print("超速违规类初始化中...")
+        self.traffic_violations_type = "超速违规类"
+        self.data = df_data.obj_data[
+            1
+        ]  # Copy to avoid modifying the original DataFrame
+        # 初始化违规统计
+        self.violation_counts = {
+            "urbanExpresswayOrHighwaySpeedOverLimit50": 0,
+            "urbanExpresswayOrHighwaySpeedOverLimit20to50": 0,
+            "urbanExpresswayOrHighwaySpeedOverLimit0to20": 0,
+            "urbanExpresswayOrHighwaySpeedUnderLimit": 0,
+            "generalRoadSpeedOverLimit50": 0,
+            "generalRoadSpeedOverLimit20to50": 0,
+        }
+
+    def process_violations(self):
+        """处理数据帧,检查超速和其他违规行为"""
+        # 提取有效道路类型
+        urban_expressway_or_highway = {1, 2}  # 使用大括号直接创建集合
+        general_road = {3}  # 直接创建包含一个元素的集合
+        self.data["v"] *= 3.6  # 转换速度
+
+        # 违规判定
+        conditions = [
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & (self.data["v"] > self.data["road_speed_max"] * 1.5)
+            ),
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & (self.data["v"] > self.data["road_speed_max"] * 1.2)
+                & (self.data["v"] <= self.data["road_speed_max"] * 1.5)
+            ),
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & (self.data["v"] > self.data["road_speed_max"])
+                & (self.data["v"] <= self.data["road_speed_max"] * 1.2)
+            ),
+            (
+                self.data["road_fc"].isin(urban_expressway_or_highway)
+                & (self.data["v"] < self.data["road_speed_min"])
+            ),
+            (
+                self.data["road_fc"].isin(general_road)
+                & (self.data["v"] > self.data["road_speed_max"] * 1.5)
+            ),
+            (
+                self.data["road_fc"].isin(general_road)
+                & (self.data["v"] > self.data["road_speed_max"] * 1.2)
+                & (self.data["v"] <= self.data["road_speed_max"] * 1.5)
+            ),
+        ]
+
+        violation_types = [
+            "urbanExpresswayOrHighwaySpeedOverLimit50",
+            "urbanExpresswayOrHighwaySpeedOverLimit20to50",
+            "urbanExpresswayOrHighwaySpeedOverLimit0to20",
+            "urbanExpresswayOrHighwaySpeedUnderLimit",
+            "generalRoadSpeedOverLimit50",
+            "generalRoadSpeedOverLimit20to50",
+        ]
+
+        # 设置违规类型
+        self.data["violation_type"] = None
+        for condition, violation_type in zip(conditions, violation_types):
+            self.data.loc[condition, "violation_type"] = violation_type
+
+        # 统计各类违规情况
+        self.violation_counts = self.data["violation_type"].value_counts().to_dict()
+
+    def statistic(self) -> str:
+        # 处理数据
+        self.process_violations()
+        # self.logger.info(f"超速违规类指标统计完成,统计结果:{self.violation_counts}")
+        return self.violation_counts
+
+
+class TrafficLightViolation(object):
+    """违反交通灯类"""
+
+    """需要补充判断车辆是左转直行还是右转,判断红绿灯是方向性红绿灯还是通过性红绿灯"""
+
+    def __init__(self, df_data):
+        """初始化方法"""
+        self.traffic_violations_type = "违反交通灯类"
+        print("违反交通灯类 类初始化中...")
+        self.config = df_data.vehicle_config
+
+        self.data_ego = df_data.ego_data  # 获取数据
+        self.violation_counts = {
+            "trafficSignalViolation": 0,
+            "illegalDrivingOrParkingAtCrossroads": 0,
+        }
+
+        # 处理数据并判定违规
+        self.process_violations()
+
+    def is_point_cross_line(self, point, stop_line_points):
+        """
+        判断车辆的某一坐标点是否跨越了由两个点定义的停止线(线段)。
+        使用向量叉积判断点是否在线段上,并通过计算车辆的航向角来判断是否跨越了停止线。
+
+        :param point: 车辆位置点 (x, y, heading),包括 x, y 位置以及朝向角度(弧度制)
+        :param stop_line_points: 停止线两个端点 [[x1, y1], [x2, y2]]
+        :return: True 如果车辆跨越了停止线,否则 False
+        """
+        line_vector = np.array(
+            [
+                stop_line_points[1][0] - stop_line_points[0][0],
+                stop_line_points[1][1] - stop_line_points[0][1],
+            ]
+        )
+        point_vector = np.array(
+            [point[0] - stop_line_points[0][0], point[1] - stop_line_points[0][1]]
+        )
+
+        cross_product = np.cross(line_vector, point_vector)
+        if cross_product != 0:
+            return False
+
+        mid_point = (
+            np.array([stop_line_points[0][0], stop_line_points[0][1]])
+            + 0.5 * line_vector
+        )
+        axletree_to_mid_vector = np.array(
+            [point[0] - mid_point[0], point[1] - mid_point[1]]
+        )
+        direction_vector = np.array([math.cos(point[2]), math.sin(point[2])])
+
+        norm_axletree_to_mid = np.linalg.norm(axletree_to_mid_vector)
+        norm_direction = np.linalg.norm(direction_vector)
+
+        if norm_axletree_to_mid == 0 or norm_direction == 0:
+            return False
+
+        cos_theta = np.dot(axletree_to_mid_vector, direction_vector) / (
+            norm_axletree_to_mid * norm_direction
+        )
+        angle_theta = math.degrees(math.acos(cos_theta))
+
+        return angle_theta <= 90
+
+    def _filter_data(self):
+        """过滤数据,筛选出需要分析的记录"""
+        return self.data_ego[
+            (self.data_ego["stopline_id"] != -1)
+            & (self.data_ego["stopline_type"] == 1)
+            & (self.data_ego["trafficlight_id"] != -1)
+        ]
+
+    def _group_data(self, filtered_data):
+        """按时间差对数据进行分组"""
+        filtered_data["time_diff"] = filtered_data["simTime"].diff().fillna(0)
+        threshold = 0.5
+        filtered_data["group"] = (filtered_data["time_diff"] > threshold).cumsum()
+        return filtered_data.groupby("group")
+
+    def _analyze_group(self, group_data):
+        """分析单个分组的数据,判断是否闯红灯"""
+        photos = []
+        stop_in_intersection = False
+
+        for _, row in group_data.iterrows():
+            vehicle_pos = np.array([row["posX"], row["posY"], row["posH"]])
+            stop_line_points = [
+                [row["stopline_x1"], row["stopline_y1"]],
+                [row["stopline_x2"], row["stopline_y2"]],
+            ]
+            stateMask = row["stateMask"]
+            heading_vector = np.array([np.cos(row["posH"]), np.sin(row["posH"])])
+            heading_vector = heading_vector / np.linalg.norm(heading_vector)
+
+            # with open(self.config_path / "vehicle_config.yaml", 'r') as f:
+            #     config = yaml.load(f, Loader=yaml.FullLoader)
+            front_wheel_pos = vehicle_pos[:2] + self.config["EGO_WHEELBASS"] * heading_vector
+            rear_wheel_pos = vehicle_pos[:2] - self.config["EGO_WHEELBASS"] * heading_vector
+            dist = math.sqrt(
+                (row["posX"] - row["traffic_light_x"]) ** 2
+                + (row["posY"] - row["traffic_light_y"]) ** 2
+            )
+
+            if abs(row["speedH"]) > 0.01 or abs(row["speedH"]) < 0.01:
+                has_crossed_line_front = (
+                    self.is_point_cross_line(front_wheel_pos, stop_line_points)
+                    and stateMask == 1
+                )
+                has_crossed_line_rear = (
+                    self.is_point_cross_line(rear_wheel_pos, stop_line_points)
+                    and row["v"] > 0
+                    and stateMask == 1
+                )
+                has_stop_in_intersection = has_crossed_line_front and row["v"] == 0
+                has_passed_intersection = has_crossed_line_front and dist < 1.0
+                # print(f'time: {row["simTime"]}, speed: {row["speedH"]}, posH: {row["posH"]}, dist: {dist:.2f}, has_stop_in_intersection: {has_stop_in_intersection}, has_passed_intersection: {has_passed_intersection}')
+
+                photos.extend(
+                    [
+                        has_crossed_line_front,
+                        has_crossed_line_rear,
+                        has_passed_intersection,
+                        has_stop_in_intersection,
+                    ]
+                )
+                stop_in_intersection = has_passed_intersection
+
+        return photos, stop_in_intersection
+
+    def is_vehicle_run_a_red_light(self):
+        """判断车辆是否闯红灯"""
+        filtered_data = self._filter_data()
+        grouped_data = self._group_data(filtered_data)
+        self.photos_group = []
+        self.stop_in_intersections = []
+
+        for _, group_data in grouped_data:
+            photos, stop_in_intersection = self._analyze_group(group_data)
+            self.photos_group.append(photos)
+            self.stop_in_intersections.append(stop_in_intersection)
+
+    def process_violations(self):
+        """处理数据并判定违规"""
+        self.is_vehicle_run_a_red_light()
+        count_1 = sum(all(photos) for photos in self.photos_group)
+        count_2 = sum(
+            stop_in_intersection for stop_in_intersection in self.stop_in_intersections
+        )
+
+        self.violation_counts["trafficSignalViolation"] = count_1
+        self.violation_counts["illegalDrivingOrParkingAtCrossroads"] = count_2
+
+    def statistic(self):
+        """返回统计结果"""
+        return self.violation_counts
+
+
+class WarningViolation(object):
+    """警告性违规类"""
+
+    def __init__(self, df_data):
+        self.traffic_violations_type = "警告性违规类"
+        print("警告性违规类 类初始化中...")
+        self.config = df_data.vehicle_config
+        self.data_ego = df_data.obj_data[1]
+        self.data = self.data_ego.copy()  # 避免修改原始 DataFrame
+        self.violation_counts = {
+            "generalRoadIrregularLaneUse": 0,  # 驾驶机动车在高速公路、城市快速路以外的道路上不按规定车道行驶
+            "urbanExpresswayOrHighwayRideLaneDivider": 0,  # 机动车在高速公路或者城市快速路上骑、轧车行道分界线
+        }
+
+    def process_violations(self):
+        general_road = {3}  # 普通道路
+        lane_type = {11}  # 车道类型 # 10: 机动车道,11: 非机动车道
+        # with open(self.config_path / "vehicle_config.yaml", 'r') as f:
+        #     config = yaml.load(f, Loader=yaml.FullLoader)
+        car_width = self.config["CAR_WIDTH"]
+        lane_width = self.data["lane_width"]  # 假定 'lane_width' 在数据中存在
+
+        # 驾驶机动车在高速公路、城市快速路以外的道路上不按规定车道行驶
+        # 使用布尔索引来筛选满足条件的行
+        condition = (self.data["road_fc"].isin(general_road)) & (
+            self.data["lane_type"].isin(lane_type)
+        )
+
+        # 创建一个新的列,并根据条件设置值
+        self.data["is_violation"] = condition
+
+        # 统计满足条件的连续时间段
+        violation_segments = self.count_continuous_violations(
+            self.data["is_violation"], self.data["simTime"]
+        )
+
+        # 更新骑行车道线违规计数
+        self.violation_counts["generalRoadIrregularLaneUse"] += len(violation_segments)
+
+        # 机动车在高速公路或者城市快速路上骑、轧车行道分界线
+
+        # 计算阈值
+        threshold = (lane_width - car_width) / 2
+
+        # 找到满足条件的行
+        self.data["is_violation"] = self.data["laneOffset"] > threshold
+
+        # 统计满足条件的连续时间段
+        violation_segments = self.count_continuous_violations(
+            self.data["is_violation"], self.data["simTime"]
+        )
+
+        # 更新骑行车道线违规计数
+        self.violation_counts["urbanExpresswayOrHighwayRideLaneDivider"] += len(
+            violation_segments
+        )
+
+    def count_continuous_violations(self, violation_series, time_series):
+        """统计连续违规的时间段数量"""
+        continuous_segments = []
+        current_segment = []
+
+        for is_violation, time in zip(violation_series, time_series):
+            if is_violation:
+                if not current_segment:  # 新的连续段开始
+                    current_segment.append(time)
+            else:
+                if current_segment:  # 连续段结束
+                    continuous_segments.append(current_segment)
+                    current_segment = []
+
+        # 检查是否有一个未结束的连续段在最后
+        if current_segment:
+            continuous_segments.append(current_segment)
+
+        return continuous_segments
+
+    def statistic(self):
+        # 处理数据
+        self.process_violations()
+        # self.logger.info(f"警告性违规类指标统计完成,统计结果:{self.violation_counts}")
+        return self.violation_counts
+
+class TrafficSignViolation(object):
+    """交通标志违规类"""
+
+    def __init__(self, df_data):
+        self.traffic_violations_type = "交通标志违规类"
+        print("交通标志违规类 类初始化中...")
+        self.data_ego = df_data.obj_data[1]
+        self.ego_data = (
+            self.data_ego[config.TRFFICSIGN_INFO].copy().reset_index(drop=True)
+        )
+        self.data_ego = self.data_ego.copy()  # 避免修改原始 DataFrame
+        self.violation_counts = {
+            "NoStraightThrough": 0,  # 禁止直行标志地方直行
+            "SpeedLimitViolation": 0,  # 违反限速规定
+            "MinimumSpeedLimitViolation": 0,  # 违反最低限速规定
+        }   
+
+    # def checkForProhibitionViolation(self):
+    #     """禁令标志判断违规:7 禁止直行,12:限制速度"""
+    #     # 筛选出sign_type1为7(禁止直行)
+    #     violation_straight_df = self.data_ego[self.data_ego["sign_type1"] == 7]
+    #     violation_speed_limit_df = self.data_ego[self.data_ego["sign_type1"] == 12]
+
+    def checkForProhibitionViolation(self):
+        """禁令标志判断违规:7 禁止直行,12:限制速度"""
+        # 筛选出 sign_type1 为7(禁止直行)的数据
+        violation_straight_df = self.data_ego[self.data_ego["sign_type1"] == 7].copy()
+        
+        # 判断车辆是否在禁止直行路段直行
+        if not violation_straight_df.empty:
+            # 按时间戳排序(假设数据按时间顺序处理)
+            violation_straight_df = violation_straight_df.sort_values('simTime')
+            
+            # 计算航向角变化(前后时间点的差值绝对值)
+            violation_straight_df['posH_diff'] = violation_straight_df['posH'].diff().abs()
+            
+            # 筛选条件:航向角变化小于阈值(例如5度)且速度不为0
+            threshold = 5  # 单位:度(根据场景调整)
+            mask = (violation_straight_df['posH_diff'] <= threshold) & (violation_straight_df['v'] > 0)
+            straight_violations = violation_straight_df[mask]
+            
+            # 统计违规次数或记录违规数据
+            self.violation_counts["prohibition_straight"] = len(straight_violations)
+            
+        
+        # 限制速度判断(原代码)
+        violation_speed_limit_df = self.data_ego[self.data_ego["sign_type1"] == 12]
+        if violation_speed_limit_df.empty:
+            mask = self.data_ego["v"] > self.data_ego["sign_speed"]
+            self.violation_counts["SpeedLimitViolation"] = len(self.data_ego[mask])
+
+    def checkForInstructionViolation(self):
+        """限速标志属于指示性标志:13:最低限速"""
+        violation_minimum_speed_limit_df = self.data_ego[self.data_ego["sign_type1"] == 13]
+        if violation_minimum_speed_limit_df.empty:
+            mask = self.data_ego["v"] < self.data_ego["sign_speed"]
+            self.violation_counts["MinimumSpeedLimitViolation"] = len(self.data_ego[mask])
+    def statistic(self):
+        self.checkForProhibitionViolation()
+        self.checkForInstructionViolation()
+        # self.logger.info(f"交通标志违规类指标统计完成,统计结果:{self.violation_counts}")
+        return self.violation_counts
+
+
+class ViolationManager:
+    """违规管理类,用于管理所有违规行为"""
+
+    def __init__(self, data_processed):
+
+        self.violations = []
+        self.data = data_processed
+        self.config = data_processed.traffic_config
+
+        self.over_take_violation = OvertakingViolation(self.data)
+        self.slow_down_violation = SlowdownViolation(self.data)
+        self.wrong_way_violation = WrongWayViolation(self.data)
+        self.speeding_violation = SpeedingViolation(self.data)
+        self.traffic_light_violation = TrafficLightViolation(self.data)
+        self.warning_violation = WarningViolation(self.data)
+
+        # self.report_statistic()
+
+    def report_statistic(self):
+
+        traffic_result = self.over_take_violation.statistic()
+        traffic_result.update(self.slow_down_violation.statistic())
+        traffic_result.update(self.traffic_light_violation.statistic())
+        traffic_result.update(self.wrong_way_violation.statistic())
+        traffic_result.update(self.speeding_violation.statistic())
+        traffic_result.update(self.warning_violation.statistic())
+
+
+        evaluator = Score(self.config)
+        result = evaluator.evaluate(traffic_result)
+
+        print("\n[交规类表现及得分情况]")
+        # self.logger.info(f"Traffic Result:{traffic_result}")
+        return result
+
+
+# 示例使用
+if __name__ == "__main__":
+    pass

+ 234 - 0
scripts/evaluator.py

@@ -0,0 +1,234 @@
+# evaluation_engine.py
+import sys
+import warnings
+import time
+from pathlib import Path
+import argparse
+from concurrent.futures import ThreadPoolExecutor
+from functools import lru_cache
+from typing import Dict, Any
+from datetime import datetime
+# 强制导入所有可能动态加载的模块
+
+
+
+# 安全设置根目录路径(动态路径管理)
+# 判断是否处于编译模式
+if hasattr(sys, "_MEIPASS"):
+    # 编译模式下使用临时资源目录
+    _ROOT_PATH = Path(sys._MEIPASS)
+else:
+    # 开发模式下使用原工程路径
+    _ROOT_PATH = Path(__file__).resolve().parent.parent
+
+sys.path.insert(0, str(_ROOT_PATH))
+print(f"当前根目录:{_ROOT_PATH}")
+print(f'当前系统路径:{sys.path}')
+
+
+class EvaluationCore:
+    """评估引擎核心类(单例模式)"""
+
+    _instance = None
+
+    def __new__(cls, logPath: str):
+        if not cls._instance:
+            cls._instance = super().__new__(cls)
+            cls._instance._init(logPath)
+        return cls._instance
+
+    def _init(self, logPath: str = None) -> None:
+        """初始化引擎组件"""
+        # configPath: str, logPath: str, dataPath: str, resultPath: str
+        self.log_path = logPath
+        self._init_log_system()
+        self._init_metrics()
+
+    def _init_log_system(self) -> None:
+        """集中式日志管理"""
+        try:
+            from modules.lib.log_manager import LogManager
+
+            log_manager = LogManager(self.log_path)
+            self.logger = log_manager.get_logger()
+        except (PermissionError, IOError) as e:
+            sys.stderr.write(f"日志系统初始化失败: {str(e)}\n")
+            sys.exit(1)
+
+    def _init_metrics(self) -> None:
+        """初始化评估模块(策略模式)"""
+        # from modules.metric import safety, comfort, traffic, efficient, function
+        self.metric_modules = {
+            "safety": self._load_module("modules.metric.safety", "Safe"),
+            "comfort": self._load_module("modules.metric.comfort", "Comfort"),
+            "traffic": self._load_module("modules.metric.traffic", "ViolationManager"),
+            "efficient": self._load_module("modules.metric.efficient", "Efficient"),
+            "function": self._load_module("modules.metric.function", "FunctionManager"),
+        }
+
+    @lru_cache(maxsize=32)
+    def _load_module(self, module_path: str, class_name: str) -> Any:
+        """动态加载评估模块(带缓存)"""
+        try:
+            __import__(module_path)
+            return getattr(sys.modules[module_path], class_name)
+        except (ImportError, AttributeError) as e:
+            self.logger.error(f"模块加载失败: {module_path}.{class_name} - {str(e)}")
+            raise
+
+    def parallel_evaluate(self, data: Any) -> Dict[str, Any]:
+        """并行化评估引擎(动态线程池)"""
+        results = {}
+        # 关键修改点1:线程数=模块数
+        with ThreadPoolExecutor(max_workers=len(self.metric_modules)) as executor:
+            # 关键修改点2:按模块名创建future映射
+            futures = {
+                module_name: executor.submit(
+                    self._run_module, module, data, module_name
+                )
+                for module_name, module in self.metric_modules.items()
+            }
+
+            # 关键修改点3:按模块顺序处理结果
+            for module_name, future in futures.items():
+                try:
+                    result = future.result()
+                    results.update(result[module_name])
+                    # 结构化合并结果(保留模块名键)
+                    # results[module_name] = result.get(module_name, {})
+                except Exception as e:
+                    self.logger.error(
+                        f"{module_name} 评估失败: {str(e)}",
+                        exc_info=True,
+                        extra={"stack": True},  # 记录完整堆栈
+                    )
+                    # 错误信息结构化存储
+                    results[module_name] = {
+                        "status": "error",
+                        "message": str(e),
+                        "timestamp": datetime.now().isoformat(),
+                    }
+        return results
+
+    def _run_module(
+        self, module_class: Any, data: Any, module_name: str
+    ) -> Dict[str, Any]:
+        """执行单个评估模块(带熔断机制)"""
+        try:
+            instance = module_class(data)
+            return {module_name: instance.report_statistic()}
+        except Exception as e:
+            self.logger.error(f"{module_name} 执行异常: {str(e)}", stack_info=True)
+            return {module_name: {"error": str(e)}}
+
+
+class EvaluationPipeline:
+    """评估流水线控制器"""
+
+    def __init__(self, configPath: str, logPath: str, dataPath: str, resultPath: str):
+        self.engine = EvaluationCore(logPath)
+        self.configPath = configPath
+        self.data_path = dataPath
+        self.report_path = resultPath
+        # self.case_name = os.path.basename(os.path.dirname(dataPath))
+        self.data_processor = self._load_data_processor()
+
+    def _load_data_processor(self) -> Any:
+        """动态加载数据预处理模块"""
+        try:
+            from modules.lib import data_process
+
+            return data_process.DataPreprocessing(self.data_path, self.configPath)
+        except ImportError as e:
+            raise RuntimeError(f"数据处理器加载失败: {str(e)}") from e
+
+    def execute_pipeline(self) -> Dict[str, Any]:
+        """端到端执行评估流程"""
+        self._validate_case()
+
+        try:
+            metric_results = self.engine.parallel_evaluate(self.data_processor)
+            from modules.lib.score import get_overall_result
+            all_result = get_overall_result(metric_results, self.configPath)
+            report = self._generate_report(
+                self.data_processor.case_name, all_result
+            )
+            return report
+        except Exception as e:
+            self.engine.logger.critical(f"流程执行失败: {str(e)}", exc_info=True)
+            return {"error": str(e)}
+
+    def _validate_case(
+        self,
+    ) -> None:
+        """用例路径验证"""
+        case_path = self.data_path
+        if not case_path.exists():
+            raise FileNotFoundError(f"用例路径不存在: {case_path}")
+        if not case_path.is_dir():
+            raise NotADirectoryError(f"无效的用例目录: {case_path}")
+
+    def _generate_report(self, case_name: str, results: Dict) -> Dict:
+        """生成评估报告(模板方法模式)"""
+        from modules.lib.common import dict2json
+
+        report_path = self.report_path
+        report_path.mkdir(parents=True, exist_ok=True, mode=0o777)
+
+        dict2json(results, report_path / f"{case_name}_report.json")
+        return results
+
+def main():
+    """命令行入口(工厂模式)"""
+    parser = argparse.ArgumentParser(
+        description="自动驾驶评估系统 V2.0",
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+    )
+    # 带帮助说明的参数定义,设置为必传参数
+    parser.add_argument(
+        "--logPath",
+        type=Path,
+        required=True,
+        help="日志文件存储路径",
+    )
+    parser.add_argument(
+        "--dataPath",
+        type=Path,
+        required=True,
+        help="预处理后的输入数据目录",
+    )
+    parser.add_argument(
+        "--configPath",
+        type=Path,
+        required=True,
+        help="评估指标配置文件路径",
+    )
+    parser.add_argument(
+        "--reportPath",
+        type=Path,
+        required=True,
+        help="评估报告输出目录",
+    )
+    args = parser.parse_args()
+
+    try:
+        pipeline = EvaluationPipeline(
+            args.configPath, args.logPath, args.dataPath, args.reportPath
+        )
+        start_time = time.perf_counter()
+
+        result = pipeline.execute_pipeline()
+
+        if "error" in result:
+            sys.exit(1)
+
+        print(f"评估完成,耗时: {time.perf_counter()-start_time:.2f}s")
+        print(f"报告路径: {pipeline.report_path}")
+    except KeyboardInterrupt:
+        print("\n用户中断操作")
+        sys.exit(130)
+
+
+if __name__ == "__main__":
+    warnings.filterwarnings("ignore")
+    main()

+ 236 - 0
scripts/evaluator_test.py

@@ -0,0 +1,236 @@
+# evaluation_engine.py
+import sys
+import warnings
+import time
+from pathlib import Path
+import argparse
+from concurrent.futures import ThreadPoolExecutor
+from functools import lru_cache
+from typing import Dict, Any
+from datetime import datetime
+# 强制导入所有可能动态加载的模块
+
+
+
+# 安全设置根目录路径(动态路径管理)
+# 判断是否处于编译模式
+if hasattr(sys, "_MEIPASS"):
+    # 编译模式下使用临时资源目录
+    _ROOT_PATH = Path(sys._MEIPASS)
+else:
+    # 开发模式下使用原工程路径
+    _ROOT_PATH = Path(__file__).resolve().parent.parent
+
+sys.path.insert(0, str(_ROOT_PATH))
+print(f"当前根目录:{_ROOT_PATH}")
+print(f'当前系统路径:{sys.path}')
+
+
+class EvaluationCore:
+    """评估引擎核心类(单例模式)"""
+
+    _instance = None
+
+    def __new__(cls, logPath: str):
+        if not cls._instance:
+            cls._instance = super().__new__(cls)
+            cls._instance._init(logPath)
+        return cls._instance
+
+    def _init(self, logPath: str = None) -> None:
+        """初始化引擎组件"""
+        # configPath: str, logPath: str, dataPath: str, resultPath: str
+        self.log_path = logPath
+        self._init_log_system()
+        self._init_metrics()
+
+    def _init_log_system(self) -> None:
+        """集中式日志管理"""
+        try:
+            from modules.lib.log_manager import LogManager
+
+            log_manager = LogManager(self.log_path)
+            self.logger = log_manager.get_logger()
+        except (PermissionError, IOError) as e:
+            sys.stderr.write(f"日志系统初始化失败: {str(e)}\n")
+            sys.exit(1)
+
+    def _init_metrics(self) -> None:
+        """初始化评估模块(策略模式)"""
+        # from modules.metric import safety, comfort, traffic, efficient, function
+        self.metric_modules = {
+            "safety": self._load_module("modules.metric.safety", "Safe"),
+            "comfort": self._load_module("modules.metric.comfort", "Comfort"),
+            "traffic": self._load_module("modules.metric.traffic", "ViolationManager"),
+            "efficient": self._load_module("modules.metric.efficient", "Efficient"),
+            "function": self._load_module("modules.metric.function", "FunctionManager"),
+        }
+
+    @lru_cache(maxsize=32)
+    def _load_module(self, module_path: str, class_name: str) -> Any:
+        """动态加载评估模块(带缓存)"""
+        try:
+            __import__(module_path)
+            return getattr(sys.modules[module_path], class_name)
+        except (ImportError, AttributeError) as e:
+            self.logger.error(f"模块加载失败: {module_path}.{class_name} - {str(e)}")
+            raise
+
+    def parallel_evaluate(self, data: Any) -> Dict[str, Any]:
+        """并行化评估引擎(动态线程池)"""
+        results = {}
+        # 关键修改点1:线程数=模块数
+        with ThreadPoolExecutor(max_workers=len(self.metric_modules)) as executor:
+            # 关键修改点2:按模块名创建future映射
+            futures = {
+                module_name: executor.submit(
+                    self._run_module, module, data, module_name
+                )
+                for module_name, module in self.metric_modules.items()
+            }
+
+            # 关键修改点3:按模块顺序处理结果
+            for module_name, future in futures.items():
+                try:
+                    result = future.result()
+                    results.update(result[module_name])
+                    # 结构化合并结果(保留模块名键)
+                    # results[module_name] = result.get(module_name, {})
+                except Exception as e:
+                    self.logger.error(
+                        f"{module_name} 评估失败: {str(e)}",
+                        exc_info=True,
+                        extra={"stack": True},  # 记录完整堆栈
+                    )
+                    # 错误信息结构化存储
+                    results[module_name] = {
+                        "status": "error",
+                        "message": str(e),
+                        "timestamp": datetime.now().isoformat(),
+                    }
+        return results
+
+    def _run_module(
+        self, module_class: Any, data: Any, module_name: str
+    ) -> Dict[str, Any]:
+        """执行单个评估模块(带熔断机制)"""
+        try:
+            instance = module_class(data)
+            return {module_name: instance.report_statistic()}
+        except Exception as e:
+            self.logger.error(f"{module_name} 执行异常: {str(e)}", stack_info=True)
+            return {module_name: {"error": str(e)}}
+
+
+class EvaluationPipeline:
+    """评估流水线控制器"""
+
+    def __init__(self, configPath: str, logPath: str, dataPath: str, resultPath: str):
+        self.engine = EvaluationCore(logPath)
+        self.configPath = configPath
+        self.data_path = dataPath
+        self.report_path = resultPath
+        # self.case_name = os.path.basename(os.path.dirname(dataPath))
+        self.data_processor = self._load_data_processor()
+
+    def _load_data_processor(self) -> Any:
+        """动态加载数据预处理模块"""
+        try:
+            from modules.lib import data_process
+
+            return data_process.DataPreprocessing(self.data_path, self.configPath)
+        except ImportError as e:
+            raise RuntimeError(f"数据处理器加载失败: {str(e)}") from e
+
+    def execute_pipeline(self) -> Dict[str, Any]:
+        """端到端执行评估流程"""
+        self._validate_case()
+
+        try:
+            metric_results = self.engine.parallel_evaluate(self.data_processor)
+            from modules.lib.score import get_overall_result
+            all_result = get_overall_result(metric_results, self.configPath)
+            report = self._generate_report(
+                self.data_processor.case_name, all_result
+            )
+            return report
+        except Exception as e:
+            self.engine.logger.critical(f"流程执行失败: {str(e)}", exc_info=True)
+            return {"error": str(e)}
+
+    def _validate_case(
+        self,
+    ) -> None:
+        """用例路径验证"""
+        case_path = self.data_path
+        if not case_path.exists():
+            raise FileNotFoundError(f"用例路径不存在: {case_path}")
+        if not case_path.is_dir():
+            raise NotADirectoryError(f"无效的用例目录: {case_path}")
+
+    def _generate_report(self, case_name: str, results: Dict) -> Dict:
+        """生成评估报告(模板方法模式)"""
+        from modules.lib.common import dict2json
+
+        report_path = self.report_path
+        report_path.mkdir(parents=True, exist_ok=True, mode=0o777)
+
+        dict2json(results, report_path / f"{case_name}_report.json")
+        return results
+
+
+def main():
+    """命令行入口(工厂模式)"""
+    parser = argparse.ArgumentParser(
+        description="自动驾驶评估系统 V2.0",
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+    )
+    # 带帮助说明的参数定义,设置为必传参数
+    parser.add_argument(
+        "--logPath",
+        type=Path,
+        default="/home/kevin/kevin/zhaoyuan/zhaoyuan/log/runtime.log",
+        help="日志文件存储路径",
+    )
+    parser.add_argument(
+        "--dataPath",
+        type=Path,
+        default="/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/zhaoyuan_0320/V2V_CSAE53-2020_ForwardCollision_LST_02-03",
+        help="预处理后的输入数据目录",
+    )
+    parser.add_argument(
+        "--configPath",
+        type=Path,
+        default="/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/zhaoyuan_0320/config/metric_config.yaml",
+        help="评估指标配置文件路径",
+    )
+    parser.add_argument(
+        "--reportPath",
+        type=Path,
+
+        default="/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/zhaoyuan_0320/result",
+        help="评估报告输出目录",
+    )
+    args = parser.parse_args()
+
+    try:
+        pipeline = EvaluationPipeline(
+            args.configPath, args.logPath, args.dataPath, args.reportPath
+        )
+        start_time = time.perf_counter()
+
+        result = pipeline.execute_pipeline()
+
+        if "error" in result:
+            sys.exit(1)
+
+        print(f"评估完成,耗时: {time.perf_counter()-start_time:.2f}s")
+        print(f"报告路径: {pipeline.report_path}")
+    except KeyboardInterrupt:
+        print("\n用户中断操作")
+        sys.exit(130)
+
+
+if __name__ == "__main__":
+    warnings.filterwarnings("ignore")
+    main()