4 次代碼提交 f9ab5bceee ... 4c77b98499

作者 SHA1 備註 提交日期
  cicv 4c77b98499 解决merge中的重复列名,修改traffic.csv中simTime倒序问题,增加交通灯与车辆航向角筛选交通灯 4 周之前
  cicv 461bd57659 修复merge_ObjState.csv中重复列名问题,添加车辆航向角对红绿灯筛选逻辑。 4 周之前
  XGJ_zhaoyuan f9ab5bceee 修改前向碰撞预警的名称ForwardCollisionW为ForwardCollision 1 月之前
  XGJ_zhaoyuan 06df90e229 修改合并数据时地图信息中红绿灯位置缺失,以及HMI,rosbag数据时间戳合并精度问题 1 月之前

+ 7 - 7
_internal/VBox.dbc

@@ -79,7 +79,7 @@ BO_ 515 DLB_03: 7 Vector__XXX
 BO_ 516 DLB_04: 8 Vector__XXX
  SG_ Speed_mps : 0|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
  SG_ Dir_Movement : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
- SG_ Heading : 32|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ Heading : 32|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ UFO_State : 48|8@1+ (1,0) [0|20] "" Vector__XXX
  SG_ Validity_SDH : 56|8@1+ (1,0) [0|15] "" Vector__XXX
 
@@ -117,7 +117,7 @@ BO_ 523 DLB_11: 4 Vector__XXX
  SG_ UFO_Speed_lat_mps : 16|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
 
 BO_ 524 DLB_12: 8 Vector__XXX
- SG_ UFO_Target_Heading : 0|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ UFO_Target_Heading : 0|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ UFO_Deviation_Heading : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
  SG_ UFO_Radius_of_turn : 32|24@1- (0.01,0) [-80000|80000] "m" Vector__XXX
  SG_ Validity_12 : 56|8@1+ (1,0) [0|7] "" Vector__XXX
@@ -157,7 +157,7 @@ BO_ 533 DLB_21: 7 Vector__XXX
  SG_ Validity_VUT_Local_Pos_XY : 48|8@1+ (1,0) [0|1] "" Vector__XXX
 
 BO_ 534 DLB_22: 7 Vector__XXX
- SG_ VUT_Heading : 0|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ VUT_Heading : 0|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ VUT_Pitch : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
  SG_ VUT_Roll : 32|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
  SG_ Validity_VUT_HPR : 48|8@1+ (1,0) [0|7] "" Vector__XXX
@@ -191,10 +191,10 @@ BO_ 539 DLB_27: 7 Vector__XXX
 BO_ 540 DLB_28: 7 Vector__XXX
  SG_ VUT_Speed_long_mps : 0|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
  SG_ VUT_Speed_lat_mps : 16|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
- SG_ VUT_Acc_Y : 32|24@1- (0.0001,0) [-800|800] "m/ss" Vector__XXX
+ SG_ VUT_Acc_Y2 : 32|24@1- (0.0001,0) [-800|800] "m/ss" Vector__XXX
 
 BO_ 541 DLB_29: 8 Vector__XXX
- SG_ VUT_Target_Heading : 0|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ VUT_Target_Heading : 0|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ VUT_Deviation_Heading : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
  SG_ VUT_Radius_of_turn : 32|24@1- (0.01,0) [-80000|80000] "m" Vector__XXX
  SG_ Validity_32 : 56|8@1+ (1,0) [0|7] "" Vector__XXX
@@ -241,13 +241,13 @@ BO_ 549 DLB_37: 7 Vector__XXX
 BO_ 550 DLB_38: 7 Vector__XXX
  SG_ UFO_Speed_n_mps : 0|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
  SG_ UFO_Dir_Movement_n : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
- SG_ UFO_Heading_n : 32|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ UFO_Heading_n : 32|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ Validity_USDHN : 48|8@1+ (1,0) [0|15] "" Vector__XXX
 
 BO_ 551 DLB_39: 7 Vector__XXX
  SG_ VUT_Speed_n_mps : 0|16@1- (0.01,0) [-300|300] "m/s" Vector__XXX
  SG_ VUT_Dir_Movement_n : 16|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
- SG_ VUT_Heading_n : 32|16@1- (0.01,0) [-180|180] "deg" Vector__XXX
+ SG_ VUT_Heading_n : 32|16@1- (-0.01,0) [-180|180] "deg" Vector__XXX
  SG_ Validity_VSDHN : 48|8@1+ (1,0) [0|15] "" Vector__XXX
 
 BO_ 562 DLB_50: 6 Vector__XXX

二進制
core/__pycache__/optimized_processor.cpython-313.pyc


二進制
core/processors/built_in/__pycache__/lst.cpython-313.pyc


+ 491 - 50
core/processors/built_in/lst.py

@@ -567,8 +567,6 @@ class ZipCSVProcessor:
         # df_vehicle.dropna(subset=[col for col in required_pos if col in df_vehicle.columns], inplace=True)
 
         try:
-            # x = np.arange(df_raw["simTime"].tolist()[0], df_raw["simTime"].tolist()[0] + 0.01 * (len(df_vehicle)),0.01)
-            # y = f"{x: .02f}"
             df_vehicle["simTime"] = np.round(np.linspace(df_raw["simTime"].tolist()[0]+28800, df_raw["simTime"].tolist()[0]+28800 + 0.01*(len(df_vehicle)), len(df_vehicle)), 2)
             df_vehicle["simFrame"] = np.arange(1, len(df_vehicle) + 1)
             df_vehicle["playerId"] = int(player_id)
@@ -1021,11 +1019,6 @@ class FinalDataProcessor:
         self.config = config
         self.output_dir = config.output_dir
 
-    def find_closet_idx(self, time, df) -> int:
-        series = (df['simTime'] - time).abs()
-        index = (df['simTime'] - time).abs().idxmin()
-        return (df['simTime'] - time).abs().idxmin()
-
     def process(self) -> bool:
         """执行最终数据合并和处理步骤。"""
         print("--- Starting Final Data Processing ---")
@@ -1037,7 +1030,12 @@ class FinalDataProcessor:
             if not obj_state_path.exists():
                 print(f"Error: Required input file not found: {obj_state_path}")
                 return False
-
+            # 处理交通灯数据并保存
+            df_traffic = self._process_trafficlight_data()
+            if not df_traffic.empty:
+                traffic_csv_path = self.output_dir / "Traffic.csv"
+                df_traffic.to_csv(traffic_csv_path, index=False, float_format='%.6f')
+                print(f"Successfully created traffic light data file: {traffic_csv_path}")
             # Load and process data
             df_object = pd.read_csv(obj_state_path, dtype={"simTime": float}, low_memory=False)
 
@@ -1051,8 +1049,8 @@ class FinalDataProcessor:
             print(f"Successfully created final merged file: {merged_csv_path}")
 
             # Clean up intermediate files
-            if obj_state_path.exists():
-                obj_state_path.unlink()
+            # if obj_state_path.exists():
+            #     obj_state_path.unlink()
 
             print("--- Final Data Processing Finished ---")
             return True
@@ -1063,9 +1061,198 @@ class FinalDataProcessor:
             traceback.print_exc()
             return False
 
+    # def _merge_optional_data(self, df_object: pd.DataFrame) -> pd.DataFrame:
+    #     """加载和合并可选数据"""
+    #     df_merged = df_object.copy()
+        
+    #     # 检查并删除重复列的函数
+    #     def clean_duplicate_columns(df):
+    #         # 查找带有 _x 或 _y 后缀的列
+    #         duplicate_cols = []
+    #         base_cols = {}
+            
+    #         for col in df.columns:
+    #             if col.endswith('_x') or col.endswith('_y'):
+    #                 base_name = col[:-2]  # 去掉后缀
+    #                 if base_name not in base_cols:
+    #                     base_cols[base_name] = []
+    #                 base_cols[base_name].append(col)
+            
+    #         # 对于每组重复列,检查数据是否相同,如果相同则只保留一个
+    #         for base_name, cols in base_cols.items():
+    #             if len(cols) > 1:
+    #                 # 检查这些列的数据是否相同
+    #                 is_identical = True
+    #                 first_col = cols[0]
+    #                 for col in cols[1:]:
+    #                     if not df[first_col].equals(df[col]):
+    #                         is_identical = False
+    #                         break
+                    
+    #                 if is_identical:
+    #                     # 数据相同,保留第一列并重命名为基本名称
+    #                     df = df.rename(columns={first_col: base_name})
+    #                     # 删除其他重复列
+    #                     for col in cols[1:]:
+    #                         duplicate_cols.append(col)
+    #                     print(f"列 {cols} 数据相同,保留为 {base_name}")
+    #                 else:
+    #                     print(f"列 {cols} 数据不同,保留所有列")
+            
+    #         # 删除重复列
+    #         if duplicate_cols:
+    #             df = df.drop(columns=duplicate_cols)
+    #             print(f"删除了重复列: {duplicate_cols}")
+            
+    #         return df
+
+    #     # --- 合并 EgoMap ---
+    #     egomap_path = self.output_dir / OUTPUT_CSV_EGOMAP
+    #     if egomap_path.exists() and egomap_path.stat().st_size > 0:
+    #         try:
+    #             df_ego = pd.read_csv(egomap_path, dtype={"simTime": float})
+    #             # 删除 simFrame 列,因为使用主数据的 simFrame
+    #             if 'simFrame' in df_ego.columns:
+    #                 df_ego = df_ego.drop(columns=['simFrame'])
+
+    #             # 按时间和ID排序
+    #             df_ego.sort_values(['simTime', 'playerId'], inplace=True)
+    #             df_merged.sort_values(['simTime', 'playerId'], inplace=True)
+
+    #             # 使用 merge_asof 进行就近合并,不包括 simFrame
+    #             df_merged = pd.merge_asof(
+    #                 df_merged,
+    #                 df_ego,
+    #                 on='simTime',
+    #                 by='playerId',
+    #                 direction='nearest',
+    #                 tolerance=0.01  # 10ms tolerance
+    #             )
+    #             print("EgoMap data merged.")
+    #         except Exception as e:
+    #             print(f"Warning: Could not merge EgoMap data from {egomap_path}: {e}")
+
+    #     # --- Merge Function ---
+    #     function_path = self.output_dir / OUTPUT_CSV_FUNCTION
+    #     if function_path.exists() and function_path.stat().st_size > 0:
+    #         try:
+    #             df_function = pd.read_csv(function_path, dtype={"timestamp": float}, low_memory=False).drop_duplicates()
+    #             # 删除 simFrame 列
+    #             if 'simFrame' in df_function.columns:
+    #                 df_function = df_function.drop(columns=['simFrame'])
+
+    #             if 'simTime' in df_function.columns:
+    #                 df_function['simTime'] = df_function['simTime'].round(2)
+    #                 df_function['time'] = df_function['simTime'].round(2).astype(float)
+    #                 df_merged['time'] = df_merged['simTime'].round(2).astype(float)
+
+    #                 common_cols = list(set(df_merged.columns) & set(df_function.columns) - {'time'})
+    #                 df_function.drop(columns=common_cols, inplace=True, errors='ignore')
+
+    #                 df_merged = pd.merge(df_merged, df_function, on=["time"], how="left")
+    #                 df_merged.drop(columns=['time'], inplace=True)
+    #                 print("Function data merged.")
+    #             else:
+    #                 print("Warning: 'simTime' column not found in Function.csv. Cannot merge.")
+    #         except Exception as e:
+    #             print(f"Warning: Could not merge Function data from {function_path}: {e}")
+    #     else:
+    #         print("Function data not found or empty, skipping merge.")
+
+    #     # --- Merge OBU ---
+    #     obu_path = self.output_dir / OUTPUT_CSV_OBU
+    #     if obu_path.exists() and obu_path.stat().st_size > 0:
+    #         try:
+    #             df_obu = pd.read_csv(obu_path, dtype={"simTime": float}, low_memory=False).drop_duplicates()
+    #             # 删除 simFrame 列
+    #             if 'simFrame' in df_obu.columns:
+    #                 df_obu = df_obu.drop(columns=['simFrame'])
+
+    #             df_obu['time'] = df_obu['simTime'].round(2).astype(float)
+    #             df_merged['time'] = df_merged['simTime'].round(2).astype(float)
+
+    #             common_cols = list(set(df_merged.columns) & set(df_obu.columns) - {'time'})
+    #             df_obu.drop(columns=common_cols, inplace=True, errors='ignore')
+
+    #             df_merged = pd.merge(df_merged, df_obu, on=["time"], how="left")
+    #             df_merged.drop(columns=['time'], inplace=True)
+    #             print("OBU data merged.")
+    #         except Exception as e:
+    #             print(f"Warning: Could not merge OBU data from {obu_path}: {e}")
+    #     else:
+    #         print("OBU data not found or empty, skipping merge.")
+
+    #     # 在所有合并完成后,清理重复列
+    #     df_merged = clean_duplicate_columns(df_merged)
+        
+    #     return df_merged
     def _merge_optional_data(self, df_object: pd.DataFrame) -> pd.DataFrame:
         """加载和合并可选数据"""
         df_merged = df_object.copy()
+        
+        # 检查并删除重复列的函数
+        def clean_duplicate_columns(df):
+            # 查找带有 _x 或 _y 后缀的列
+            duplicate_cols = []
+            base_cols = {}
+            
+            # 打印清理前的列名
+            print(f"清理重复列前的列名: {df.columns.tolist()}")
+            
+            for col in df.columns:
+                if col.endswith('_x') or col.endswith('_y'):
+                    base_name = col[:-2]  # 去掉后缀
+                    if base_name not in base_cols:
+                        base_cols[base_name] = []
+                    base_cols[base_name].append(col)
+            
+            # 对于每组重复列,检查数据是否相同,如果相同则只保留一个
+            for base_name, cols in base_cols.items():
+                if len(cols) > 1:
+                    # 检查这些列的数据是否相同
+                    is_identical = True
+                    first_col = cols[0]
+                    for col in cols[1:]:
+                        if not df[first_col].equals(df[col]):
+                            is_identical = False
+                            break
+                    
+                    if is_identical:
+                        # 数据相同,保留第一列并重命名为基本名称
+                        df = df.rename(columns={first_col: base_name})
+                        # 删除其他重复列
+                        for col in cols[1:]:
+                            duplicate_cols.append(col)
+                        print(f"列 {cols} 数据相同,保留为 {base_name}")
+                    else:
+                        print(f"列 {cols} 数据不同,保留所有列")
+                        # 如果是 simTime 相关列,确保保留一个
+                        if base_name == 'simTime' and 'simTime' not in df.columns:
+                            df = df.rename(columns={cols[0]: 'simTime'})
+                            print(f"将 {cols[0]} 重命名为 simTime")
+                            # 删除其他 simTime 相关列
+                            for col in cols[1:]:
+                                duplicate_cols.append(col)
+            
+            # 删除重复列
+            if duplicate_cols:
+                # 确保不会删除 simTime 列
+                if 'simTime' not in df.columns and any(col.startswith('simTime_') for col in duplicate_cols):
+                    # 找到一个 simTime 相关列保留
+                    for col in duplicate_cols[:]:
+                        if col.startswith('simTime_'):
+                            df = df.rename(columns={col: 'simTime'})
+                            duplicate_cols.remove(col)
+                            print(f"将 {col} 重命名为 simTime")
+                            break
+                
+                df = df.drop(columns=duplicate_cols)
+                print(f"删除了重复列: {duplicate_cols}")
+            
+            # 打印清理后的列名
+            print(f"清理重复列后的列名: {df.columns.tolist()}")
+            
+            return df
 
         # --- 合并 EgoMap ---
         egomap_path = self.output_dir / OUTPUT_CSV_EGOMAP
@@ -1076,82 +1263,329 @@ class FinalDataProcessor:
                 if 'simFrame' in df_ego.columns:
                     df_ego = df_ego.drop(columns=['simFrame'])
 
+                # 打印合并前的列名
+                print(f"合并 EgoMap 前 df_merged 的列: {df_merged.columns.tolist()}")
+                print(f"df_ego 的列: {df_ego.columns.tolist()}")
+
                 # 按时间和ID排序
                 df_ego.sort_values(['simTime', 'playerId'], inplace=True)
                 df_merged.sort_values(['simTime', 'playerId'], inplace=True)
 
                 # 使用 merge_asof 进行就近合并,不包括 simFrame
-                # df_merged = pd.merge_asof(
-                #     df_merged,
-                #     df_ego,
-                #     on='simTime',
-                #     by='playerId',
-                #     direction='nearest',
-                #     tolerance=0.01  # 10ms tolerance
-                # )
-                df_merged = pd.merge(
+                df_merged = pd.merge_asof(
                     df_merged,
                     df_ego,
-                    on=["simTime", "playerId"],
-                    how="left",
-                    suffixes=("", "_map"),
+                    on='simTime',
+                    by='playerId',
+                    direction='nearest',
+                    tolerance=0.01  # 10ms tolerance
                 )
-                if {"posX_map", "posY_map", "posH_map"}.issubset(df_merged.columns):
-                    df_merged.drop(columns=["posX_map", "posY_map", "posH_map"], inplace=True)
+                
+                # 打印合并后的列名
+                print(f"合并 EgoMap 后 df_merged 的列: {df_merged.columns.tolist()}")
+                
+                # 确保 simTime 列存在
+                if 'simTime' not in df_merged.columns:
+                    if 'simTime_x' in df_merged.columns:
+                        df_merged.rename(columns={'simTime_x': 'simTime'}, inplace=True)
+                        print("将 simTime_x 重命名为 simTime")
+                    else:
+                        print("警告: 合并 EgoMap 后找不到 simTime 列!")
+                
                 print("EgoMap data merged.")
             except Exception as e:
                 print(f"Warning: Could not merge EgoMap data from {egomap_path}: {e}")
+                import traceback
+                traceback.print_exc()
+
+        # 先处理可能的列名重复问题
+        df_merged = clean_duplicate_columns(df_merged)
+
+        # --- 合并 Traffic ---
+        traffic_path = self.output_dir / "Traffic.csv"
+        if traffic_path.exists() and traffic_path.stat().st_size > 0:
+            try:
+                df_traffic = pd.read_csv(traffic_path, dtype={"simTime": float}, low_memory=False).drop_duplicates()
+                # 删除 simFrame 列
+                if 'simFrame' in df_traffic.columns:
+                    df_traffic = df_traffic.drop(columns=['simFrame'])
+
+                # 根据车辆航向角确定行驶方向并筛选对应的红绿灯
+                def get_direction_from_heading(heading):
+                    # 将角度归一化到 -180 到 180 度范围
+                    heading = heading % 360
+                    if heading > 180:
+                        heading -= 360
+                    
+                    # 确定方向:北(N)、东(E)、南(S)、西(W)
+                    if -45 <= heading <= 45:  # 北向
+                        return 'N'
+                    elif 45 < heading <= 135:  # 东向
+                        return 'E'
+                    elif -135 <= heading < -45:  # 西向
+                        return 'W'
+                    else:  # 南向 (135 < heading <= 180 或 -180 <= heading < -135)
+                        return 'S'
+                
+                # 检查posH列是否存在,如果不存在但posH_x存在,则使用posH_x
+                heading_col = 'posH'
+                if heading_col not in df_merged.columns:
+                    if 'posH_x' in df_merged.columns:
+                        heading_col = 'posH_x'
+                        print(f"使用 {heading_col} 替代 posH")
+                    else:
+                        print(f"警告: 找不到航向角列 posH 或 posH_x")
+                        return df_merged
+                
+                # 添加方向列
+                df_merged['vehicle_direction'] = df_merged[heading_col].apply(get_direction_from_heading)
+                
+                # 创建 phaseId 到方向的映射
+                phase_to_direction = {
+                    1: 'S',  # 南直行
+                    2: 'W',  # 西直行
+                    3: 'N',  # 北直行
+                    4: 'E',  # 东直行
+                    5: 'S',  # 南行人
+                    6: 'W',  # 西行人
+                    7: 'S',  # 南左转
+                    8: 'W',  # 西左转
+                    9: 'N',  # 北左转
+                    10: 'E', # 东左转
+                    11: 'N', # 北行人
+                    12: 'E', # 东行人
+                    13: 'S', # 南右转
+                    14: 'W', # 西右转
+                    15: 'N', # 北右转
+                    16: 'E'  # 东右转
+                }
+                
+                # 创建 trafficlight_id 到方向的映射
+                trafficlight_to_direction = {
+                    # 南向北方向的红绿灯
+                    48100017: 'S', 
+                    48100038: 'S', 
+                    48100043: 'S',
+                    48100030: 'S',
+                    # 西向东方向的红绿灯
+                    48100021: 'W', 
+                    48100039: 'W',
+                    # 东向西方向的红绿灯
+                    48100041: 'E', 
+                    48100019: 'E',
+                    # 北向南方向的红绿灯
+                    48100033: 'N', 
+                    48100018: 'N', 
+                    48100022: 'N'
+                }
+                
+                # 添加时间列用于合并
+                df_traffic['time'] = df_traffic['simTime'].round(2).astype(float)
+                
+                # 检查 df_merged 中是否有 simTime 列
+                if 'simTime' not in df_merged.columns:
+                    print("警告: 合并 Traffic 前 df_merged 中找不到 simTime 列!")
+                    # 尝试查找 simTime_x 或其他可能的列
+                    if 'simTime_x' in df_merged.columns:
+                        df_merged.rename(columns={'simTime_x': 'simTime'}, inplace=True)
+                        print("将 simTime_x 重命名为 simTime")
+                    else:
+                        print("严重错误: 无法找到任何 simTime 相关列,无法继续合并!")
+                        return df_merged
+                
+                df_merged['time'] = df_merged['simTime'].round(2).astype(float)
+                
+                # 合并 Traffic 数据
+                df_merged = pd.merge(df_merged, df_traffic, on=["time"], how="left")
+                
+                # 再次处理可能的列名重复问题
+                df_merged = clean_duplicate_columns(df_merged)
+                
+                # 检查trafficlight_id列是否存在
+                trafficlight_col = 'trafficlight_id'
+                if trafficlight_col not in df_merged.columns:
+                    if 'trafficlight_id_x' in df_merged.columns:
+                        trafficlight_col = 'trafficlight_id_x'
+                        print(f"使用 {trafficlight_col} 替代 trafficlight_id")
+                    else:
+                        print(f"警告: 找不到红绿灯ID列 trafficlight_id 或 trafficlight_id_x")
+                
+                # 筛选与车辆行驶方向相关的红绿灯
+                def filter_relevant_traffic_light(row):
+                    if 'phaseId' not in row or pd.isna(row['phaseId']):
+                        return np.nan
+                    
+                    # 获取 phaseId 对应的方向
+                    phase_id = int(row['phaseId']) if not pd.isna(row['phaseId']) else None
+                    if phase_id is None:
+                        return np.nan
+                        
+                    phase_direction = phase_to_direction.get(phase_id, None)
+                    
+                    # 如果 phaseId 方向与车辆方向匹配
+                    if phase_direction == row['vehicle_direction']:
+                        # 查找该方向的所有红绿灯 ID
+                        relevant_ids = [tid for tid, direction in trafficlight_to_direction.items() 
+                                       if direction == phase_direction]
+                        
+                        # 如果 trafficlight_id 在 EgoMap 中且方向匹配
+                        if trafficlight_col in row and not pd.isna(row[trafficlight_col]) and row[trafficlight_col] in relevant_ids:
+                            return row[trafficlight_col]
+                    
+                    return np.nan
+                
+                # 应用筛选函数
+                df_merged['filtered_trafficlight_id'] = df_merged.apply(filter_relevant_traffic_light, axis=1)
+                
+                # 清理临时列
+                print(f"删除 time 列前 df_merged 的列: {df_merged.columns.tolist()}")
+                df_merged.drop(columns=['time'], inplace=True)
+                print(f"删除 time 列后 df_merged 的列: {df_merged.columns.tolist()}")
+                
+                # 确保 simTime 列存在
+                if 'simTime' not in df_merged.columns:
+                    if 'simTime_x' in df_merged.columns:
+                        df_merged.rename(columns={'simTime_x': 'simTime'}, inplace=True)
+                        print("将 simTime_x 重命名为 simTime")
+                    else:
+                        print("警告: 处理 Traffic 数据后找不到 simTime 列!")
+                
+                print("Traffic light data merged and filtered.")
+            except Exception as e:
+                print(f"Warning: Could not merge Traffic data from {traffic_path}: {e}")
+                import traceback
+                traceback.print_exc()
+        else:
+            print("Traffic data not found or empty, skipping merge.")
 
         # --- Merge Function ---
         function_path = self.output_dir / OUTPUT_CSV_FUNCTION
         if function_path.exists() and function_path.stat().st_size > 0:
             try:
-                df_function = pd.read_csv(function_path, dtype={"timestamp": float}, low_memory=False).drop_duplicates()
+                # 添加调试信息
+                print(f"正在读取 Function 数据: {function_path}")
+                df_function = pd.read_csv(function_path, low_memory=False).drop_duplicates()
+                print(f"Function 数据列名: {df_function.columns.tolist()}")
+                
                 # 删除 simFrame 列
                 if 'simFrame' in df_function.columns:
                     df_function = df_function.drop(columns=['simFrame'])
 
+                # 确保 simTime 列存在并且是浮点型
                 if 'simTime' in df_function.columns:
-                    df_function['simTime'] = df_function['simTime'].round(2)
-                    df_function['time1'] = df_function['simTime'].apply(lambda x: self.find_closet_idx(x, df_merged))
-                    common_cols = list(set(df_merged.columns) & set(df_function.columns))
-                    df_function.drop(columns=common_cols, inplace=True, errors='ignore')
-                    df_merged = df_merged.merge(df_function, right_on='time1', left_index=True, how='left')
-
-
-                    df_merged.drop(columns=['time1'], inplace=True)
-                    df_merged.reset_index(drop=True, inplace=True)
-                    print("Function data merged.")
+                    # 安全地将 simTime 转换为浮点型
+                    try:
+                        df_function['simTime'] = pd.to_numeric(df_function['simTime'], errors='coerce')
+                        df_function = df_function.dropna(subset=['simTime'])  # 删除无法转换的行
+                        df_function['time'] = df_function['simTime'].round(2)
+                        
+                        # 安全地处理 df_merged 的 simTime 列
+                        if 'simTime' in df_merged.columns:
+                            print(f"df_merged['simTime'] 的类型: {df_merged['simTime'].dtype}")
+                            print(f"df_merged['simTime'] 的前5个值: {df_merged['simTime'].head().tolist()}")
+                            
+                            df_merged['time'] = pd.to_numeric(df_merged['simTime'], errors='coerce').round(2)
+                            # 删除 time 列中的 NaN 值
+                            nan_count = df_merged['time'].isna().sum()
+                            if nan_count > 0:
+                                print(f"警告: 转换后有 {nan_count} 个 NaN 值,将删除这些行")
+                                df_merged = df_merged.dropna(subset=['time'])
+                            
+                            # 确保两个 DataFrame 的 time 列类型一致
+                            df_function['time'] = df_function['time'].astype(float)
+                            df_merged['time'] = df_merged['time'].astype(float)
+
+                            common_cols = list(set(df_merged.columns) & set(df_function.columns) - {'time'})
+                            df_function.drop(columns=common_cols, inplace=True, errors='ignore')
+
+                            # 合并数据
+                            df_merged = pd.merge(df_merged, df_function, on=["time"], how="left")
+                            df_merged.drop(columns=['time'], inplace=True)
+                            print("Function 数据合并成功。")
+                        else:
+                            print("警告: df_merged 中找不到 'simTime' 列,无法合并 Function 数据。")
+                            # 打印所有列名以便调试
+                            print(f"df_merged 的所有列: {df_merged.columns.tolist()}")
+                    except Exception as e:
+                        print(f"警告: 处理 Function.csv 中的 simTime 列时出错: {e}")
+                        import traceback
+                        traceback.print_exc()
                 else:
-                    print("Warning: 'simTime' column not found in Function.csv. Cannot merge.")
+                    print(f"警告: Function.csv 中找不到 'simTime' 列。可用的列: {df_function.columns.tolist()}")
             except Exception as e:
-                print(f"Warning: Could not merge Function data from {function_path}: {e}")
+                print(f"警告: 无法合并 Function 数据: {e}")
+                import traceback
+                traceback.print_exc()
         else:
-            print("Function data not found or empty, skipping merge.")
+            print(f"Function 数据文件不存在或为空: {function_path}")
 
         # --- Merge OBU ---
         obu_path = self.output_dir / OUTPUT_CSV_OBU
         if obu_path.exists() and obu_path.stat().st_size > 0:
             try:
-                df_obu = pd.read_csv(obu_path, dtype={"simTime": float}, low_memory=False).drop_duplicates()
+                # 添加调试信息
+                print(f"正在读取 OBU 数据: {obu_path}")
+                df_obu = pd.read_csv(obu_path, low_memory=False).drop_duplicates()
+                print(f"OBU 数据列名: {df_obu.columns.tolist()}")
+                
                 # 删除 simFrame 列
                 if 'simFrame' in df_obu.columns:
                     df_obu = df_obu.drop(columns=['simFrame'])
 
-                df_obu['time2'] = df_obu['simTime'].apply(lambda x: self.find_closet_idx(x, df_merged))
-                common_cols = list(set(df_merged.columns) & set(df_obu.columns))
-                df_obu.drop(columns=common_cols, inplace=True, errors='ignore')
-                df_merged = df_merged.merge(df_obu, right_on = 'time2', left_index=True, how = 'left')
-
-                df_merged.drop(columns=['time2'], inplace=True)
-                print("OBU data merged.")
+                # 确保 simTime 列存在并且是浮点型
+                if 'simTime' in df_obu.columns:
+                    # 安全地将 simTime 转换为浮点型
+                    try:
+                        df_obu['simTime'] = pd.to_numeric(df_obu['simTime'], errors='coerce')
+                        df_obu = df_obu.dropna(subset=['simTime'])  # 删除无法转换的行
+                        df_obu['time'] = df_obu['simTime'].round(2)
+                        
+                        # 安全地处理 df_merged 的 simTime 列
+                        if 'simTime' in df_merged.columns:
+                            print(f"合并 OBU 前 df_merged['simTime'] 的类型: {df_merged['simTime'].dtype}")
+                            print(f"合并 OBU 前 df_merged['simTime'] 的前5个值: {df_merged['simTime'].head().tolist()}")
+                            
+                            df_merged['time'] = pd.to_numeric(df_merged['simTime'], errors='coerce').round(2)
+                            # 删除 time 列中的 NaN 值
+                            nan_count = df_merged['time'].isna().sum()
+                            if nan_count > 0:
+                                print(f"警告: 转换后有 {nan_count} 个 NaN 值,将删除这些行")
+                                df_merged = df_merged.dropna(subset=['time'])
+                            
+                            # 确保两个 DataFrame 的 time 列类型一致
+                            df_obu['time'] = df_obu['time'].astype(float)
+                            df_merged['time'] = df_merged['time'].astype(float)
+
+                            common_cols = list(set(df_merged.columns) & set(df_obu.columns) - {'time'})
+                            df_obu.drop(columns=common_cols, inplace=True, errors='ignore')
+
+                            # 合并数据
+                            df_merged = pd.merge(df_merged, df_obu, on=["time"], how="left")
+                            df_merged.drop(columns=['time'], inplace=True)
+                            print("OBU 数据合并成功。")
+                        else:
+                            print("警告: df_merged 中找不到 'simTime' 列,无法合并 OBU 数据。")
+                            # 打印所有列名以便调试
+                            print(f"df_merged 的所有列: {df_merged.columns.tolist()}")
+                    except Exception as e:
+                        print(f"警告: 处理 OBUdata.csv 中的 simTime 列时出错: {e}")
+                        import traceback
+                        traceback.print_exc()
+                else:
+                    print(f"警告: OBUdata.csv 中找不到 'simTime' 列。可用的列: {df_obu.columns.tolist()}")
             except Exception as e:
-                print(f"Warning: Could not merge OBU data from {obu_path}: {e}")
+                print(f"警告: 无法合并 OBU 数据: {e}")
+                import traceback
+                traceback.print_exc()
         else:
-            print("OBU data not found or empty, skipping merge.")
-        df_merged[['speedH', 'accelX', 'accelY']] = -df_merged[['speedH', 'accelX', 'accelY']]
+            print(f"OBU 数据文件不存在或为空: {obu_path}")
+
+        # 在所有合并完成后,再次清理重复列
+        df_merged = clean_duplicate_columns(df_merged)
+        
         return df_merged
 
+
     def _process_trafficlight_data(self) -> pd.DataFrame:
         """Processes traffic light JSON data if available."""
         # Check if json_path is provided and exists
@@ -1229,6 +1663,14 @@ class FinalDataProcessor:
             df_trafficlights.drop_duplicates(subset=['simTime', 'playerId', 'phaseId', 'stateMask'], keep='first',
                                              inplace=True)
             print(f"Processed {len(df_trafficlights)} unique traffic light state entries.")
+            # 按时间升序排序 - 修复倒序问题
+            df_trafficlights = df_trafficlights.sort_values('simTime', ascending=True)
+            
+            # 添加调试信息
+            print(f"交通灯数据时间范围: {df_trafficlights['simTime'].min()} 到 {df_trafficlights['simTime'].max()}")
+            print(f"交通灯数据前5行时间: {df_trafficlights['simTime'].head().tolist()}")
+            print(f"交通灯数据后5行时间: {df_trafficlights['simTime'].tail().tolist()}")
+            
             return df_trafficlights
 
         except json.JSONDecodeError as e:
@@ -1238,7 +1680,6 @@ class FinalDataProcessor:
             print(f"Unexpected error processing traffic light data: {e}")
             return pd.DataFrame()
 
-
 # --- Rosbag Processing ---
 class RosbagProcessor:
     """Extracts data from Rosbag files within a ZIP archive."""

+ 3 - 5
run.py

@@ -35,23 +35,21 @@ def parse_arguments():
     parser.add_argument(
         '--zip-path',
         type=Path,
-        default=Path('/home/server/桌面/XGJ/dataprocess/data_0421/V2V_CSAE53-2020_ForwardCollisionW_LST_01-01.zip'),
-        # default=Path('/home/server/桌面/XGJ/dataprocess/data_0421/V2I_CSAE53-2020_LeftTurnAssist_LST_01-04.zip'),
-        # default=Path('/home/server/桌面/XGJ/dataprocess/data_0421/V2I_CSAE53-2020_LeftTurnAssist_LST_01-01.zip'),
+        default=Path('/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/preprocess_run/data/V2I_CSAE53-2020_RedLightViolationW_LST_01-01.zip'),
         help='输入的ZIP数据文件路径'
     )
 
     parser.add_argument(
         '--trafficlight-json',
         type=Path,
-        default=None,
+        default='/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/preprocess_run/data/process_20250421_154131.json',
         help='交通信号灯JSON配置文件路径'
     )
 
     parser.add_argument(
         '--output-dir',
         type=Path,
-        default=Path('./data_zhaoyuan3/'),
+        default=Path('./output/'),
         help='输出目录的基础路径'
     )
 

+ 37 - 0
tools/WGS2UTM.py

@@ -0,0 +1,37 @@
+# from pyproj import Transformer
+
+# # 定义坐标系转换(WGS84 → UTM Zone 51N)
+# transformer = Transformer.from_crs("EPSG:4326", "EPSG:32651")
+
+# # 输入经纬度(注意:经度在前,纬度在后)
+# lon, lat = 123.456, 34.567  # 示例坐标(需替换为实际值)
+
+# # 执行转换
+# easting, northing = transformer.transform(lat, lon)  # 注意:pyproj v3+要求纬度在前
+# print(f"UTM 51N坐标: {easting:.3f}, {northing:.3f}")
+
+
+import pandas as pd
+from pyproj import Transformer
+
+# 读取CSV文件(注意:simTime是字符串类型,避免精度丢失)
+df = pd.read_csv(
+    "/home/kevin/kevin/zhaoyuan/sqlite3_demo/processed_results/V2I-DLSG-1_2025-02-26_16-41-09/ObjState.csv", 
+    dtype={'simTime': float}  # 以字符串形式读取,保留完整精度
+)
+
+# 打印每一行的simTime值
+print("===== 开始打印 simTime =====")
+for index, row in df.iterrows():
+    print(f"第 {index + 1} 行: simTime = {row['simTime']}")
+print("===== 结束打印 simTime =====")
+
+# 创建坐标转换器:WGS84经纬度 → UTM51N(EPSG:32651)
+transformer = Transformer.from_crs("EPSG:4326", "EPSG:32651")
+
+# 批量转换经纬度(注意参数顺序为lon, lat)
+# df["posX"], df["posY"] = transformer.transform(df["lon"].values, df["lat"].values)
+df["posX"], df["posY"] = transformer.transform(df["lat"].values, df["lon"].values)
+
+# 保存结果到新CSV文件
+df.to_csv("ObjState.csv", index=False)

+ 86 - 0
tools/playback_matplotlib.py

@@ -0,0 +1,86 @@
+import pandas as pd
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+from matplotlib.animation import FuncAnimation
+import os
+
+# 位置偏移常量
+X_OFFSET = 258109.4239876
+Y_OFFSET = 4149969.964821
+
+# 读取数据
+csv_data_path = "/home/kevin/kevin/zhaoyuan/zhaoyuan/data/processed/data_zhaoyuan1"
+lane_map_df = pd.read_csv(os.path.join(csv_data_path, 'LaneMap.csv'))
+car_df = pd.read_csv(os.path.join(csv_data_path, 'ObjState.csv'))
+
+# 创建颜色映射
+# 你可以通过 playerId 生成颜色,也可以使用 matplotlib 的 colormap
+unique_vehicle_ids = car_df['playerId'].unique()
+colors = ['red', 'blue', 'green', 'orange', 'purple', 'cyan', 'magenta', 'yellow', 'black', 'brown']
+# colors = plt.cm.viridis(np.linspace(0, 1, len(unique_vehicle_ids)))  # 使用 Viridis colormap
+
+# 创建颜色字典
+vehicle_colors = {vehicle_id: colors[i] for i, vehicle_id in enumerate(unique_vehicle_ids)}
+
+# 依据时间分组车辆数据,按 simTime 进行分组
+grouped_car_data = car_df.groupby("simTime")
+
+# 设置画布
+fig, ax = plt.subplots()
+ax.set_aspect("equal")
+ax.set_xlim([lane_map_df["centerLine_x"].min() - 20, lane_map_df["centerLine_x"].max() + 20])
+ax.set_ylim([lane_map_df["centerLine_y"].min() - 20, lane_map_df["centerLine_y"].max() + 20])
+
+# 绘制车道线
+ax.plot(lane_map_df["centerLine_x"], lane_map_df["centerLine_y"], color="red", linewidth=2, linestyle="--", label="Vehicle Center Line")
+ax.plot(lane_map_df["lane_left_x"], lane_map_df["lane_left_y"], color="blue", linewidth=2, label="Left Lane Line")
+ax.plot(lane_map_df["lane_right_x"], lane_map_df["lane_right_y"], color="blue", linewidth=2, label="Right Lane Line")
+
+# 动态绘制车辆
+def update(frame):
+    # 清除之前的矩形
+    for patch in ax.patches:
+        patch.remove()
+    
+    # 获取当前时间的车辆数据
+    current_time = list(grouped_car_data.groups.keys())[frame]  # 获取当前帧的时间
+    current_data = grouped_car_data.get_group(current_time)
+
+    # 遍历当前时间点上的所有车辆
+    for index, row in current_data.iterrows():
+        posX = row['posX'] + X_OFFSET
+        posY = row['posY'] + Y_OFFSET
+        posH = row['posH']  # 假设已在弧度或者直接为弧度
+        dimX = row['dimX']
+        dimY = row['dimY']
+
+        # 计算矩形的四个角点
+        corners = np.array([
+            [-dimX / 2, -dimY / 2],
+            [dimX / 2, -dimY / 2],
+            [dimX / 2, dimY / 2],
+            [-dimX / 2, dimY / 2],
+        ])
+        
+        # 旋转矩形
+        rotation_matrix = np.array(
+            [[np.cos(posH), np.sin(posH)], [-np.sin(posH), np.cos(posH)]]
+        )
+        rotated_corners = corners.dot(rotation_matrix) + np.array([posX, posY])
+
+        # 获取车辆的颜色
+        vehicle_color = vehicle_colors[row['playerId']]
+        
+        # 创建矩形对象
+        vehicle = patches.Polygon(
+            rotated_corners, closed=True, fill=True, color=vehicle_color, alpha=0.5
+        )
+        ax.add_patch(vehicle)
+
+    plt.title(f'Time: {current_time:.2f}s')
+
+# 设置动画,frames为时间段的数量
+ani = FuncAnimation(fig, update, frames=len(grouped_car_data.groups), repeat=True, interval=50)
+
+plt.show()

+ 153 - 0
tools/playback_pygame.py

@@ -0,0 +1,153 @@
+import pygame
+import sys
+import pandas as pd
+import os
+import numpy as np
+
+# 初始化Pygame
+pygame.init()
+
+# 设置常量
+WIDTH, HEIGHT = 1200, 1000
+BACKGROUND_COLOR = (255, 255, 255)
+LEFT_LANE_COLOR = (0, 128, 255)  # 左车道线颜色
+RIGHT_LANE_COLOR = (255, 0, 0)    # 右车道线颜色
+CENTER_LINE_COLOR = (0, 255, 0)    # 中心线颜色
+VEHICLE_COLOR_DEFAULT = (0, 0, 255)  # 默认车辆颜色
+X_OFFSET = 258109.4239876
+Y_OFFSET = 4149969.964821
+
+
+# 创建颜色映射
+vehicle_colors = {
+    1: (255, 0, 0),  # 红色
+}
+
+# 提供一些额外的颜色
+additional_colors = [
+    (0, 0, 255),     # 蓝色
+    (0, 255, 0),     # 绿色
+    (255, 165, 0),   # 橙色
+    (128, 0, 128),   # 紫色
+    (255, 255, 0),   # 黄色
+]
+
+# 创建窗口
+screen = pygame.display.set_mode((WIDTH, HEIGHT))
+pygame.display.set_caption("Lane Visualization")
+font = pygame.font.Font(None, 24)  # 默认字体
+
+# 读取数据
+csv_data_path = "./"
+csv_path = os.path.join(csv_data_path, 'LaneMap.csv')
+df = pd.read_csv(csv_path)
+
+car_csv_path = os.path.join(csv_data_path, 'ObjState.csv')
+car_df = pd.read_csv(car_csv_path)
+
+# 提取车道线数据
+left_lane_x = df['lane_left_x'].values
+left_lane_y = df['lane_left_y'].values
+right_lane_x = df['lane_right_x'].values
+right_lane_y = df['lane_right_y'].values
+center_line_x = df['centerLine_x'].values
+center_line_y = df['centerLine_y'].values
+
+# 提取车辆数据,将车辆信息按 playerId 分组
+car_ids = car_df['playerId'].unique()
+car_data = {player_id: car_df[car_df['playerId'] == player_id].reset_index(drop=True) for player_id in car_ids}
+
+# 为未分配颜色的车辆分配颜色
+for idx, player_id in enumerate(car_ids):
+    print(player_id)
+    if player_id not in vehicle_colors:
+        vehicle_colors[player_id] = additional_colors[idx % len(additional_colors)]
+
+# 计算坐标范围
+min_x = min(left_lane_x.min(), right_lane_x.min(), center_line_x.min())
+max_x = max(left_lane_x.max(), right_lane_x.max(), center_line_x.max())
+min_y = min(left_lane_y.min(), right_lane_y.min(), center_line_y.min())
+max_y = max(left_lane_y.max(), right_lane_y.max(), center_line_y.max())
+
+# 计算缩放比例
+scale_x = WIDTH / (max_x - min_x)
+scale_y = HEIGHT / (max_y - min_y)
+scale = min(scale_x, scale_y)
+
+# 计算偏移量
+offset_x = -min_x * scale
+offset_y = -min_y * scale
+
+# 主循环
+car_indices = {player_id: 0 for player_id in car_ids}  # 保存每辆车的当前索引
+
+while True:
+    for event in pygame.event.get():
+        if event.type == pygame.QUIT:
+            pygame.quit()
+            sys.exit()
+
+    # 清屏
+    screen.fill(BACKGROUND_COLOR)
+
+    # 绘制车道线
+    for i in range(len(left_lane_x) - 1):
+        start_pos = (left_lane_x[i] * scale + offset_x, HEIGHT - (left_lane_y[i] * scale + offset_y))
+        end_pos = (left_lane_x[i + 1] * scale + offset_x, HEIGHT - (left_lane_y[i + 1] * scale + offset_y))
+        pygame.draw.line(screen, LEFT_LANE_COLOR, start_pos, end_pos, 2)
+
+    for i in range(len(right_lane_x) - 1):
+        start_pos = (right_lane_x[i] * scale + offset_x, HEIGHT - (right_lane_y[i] * scale + offset_y))
+        end_pos = (right_lane_x[i + 1] * scale + offset_x, HEIGHT - (right_lane_y[i + 1] * scale + offset_y))
+        pygame.draw.line(screen, RIGHT_LANE_COLOR, start_pos, end_pos, 2)
+
+    for i in range(len(center_line_x) - 1):
+        start_pos = (center_line_x[i] * scale + offset_x, HEIGHT - (center_line_y[i] * scale + offset_y))
+        end_pos = (center_line_x[i + 1] * scale + offset_x, HEIGHT - (center_line_y[i + 1] * scale + offset_y))
+        pygame.draw.line(screen, CENTER_LINE_COLOR, start_pos, end_pos, 2)
+
+    # 绘制车辆
+    for player_id, car_df in car_data.items():
+        car_index = car_indices[player_id]
+        if car_index < len(car_df):
+            posX = (car_df['posX'][car_index] + X_OFFSET)* scale + offset_x
+            posY = HEIGHT - ((car_df['posY'][car_index] + Y_OFFSET) * scale + offset_y)  # 反转Y坐标
+            posH = car_df['posH'][car_index]  # 车辆方向(弧度)
+
+            # 计算矩形的四个角点
+            # vehicle_length = car_df['dimX'][car_index] * scale  # 车辆长度,以像素表示
+            # vehicle_width = car_df['dimY'][car_index] * scale    # 车辆宽度,以像素表示
+            vehicle_length = 5.0 * scale  # 车辆长度,以像素表示
+            vehicle_width = 2.0 * scale    # 车辆宽度,以像素表示
+
+            corners = np.array([
+                [-vehicle_length / 2, -vehicle_width / 2],
+                [ vehicle_length / 2, -vehicle_width / 2],
+                [ vehicle_length / 2,  vehicle_width / 2],
+                [-vehicle_length / 2,  vehicle_width / 2]
+            ])
+
+            # 旋转矩形
+            rotation_matrix = np.array([
+                [np.cos(posH), -np.sin(posH)],
+                [np.sin(posH),  np.cos(posH)]
+            ])
+            rotated_corners = corners.dot(rotation_matrix) + np.array([posX, posY])
+
+            # 获取车辆颜色
+            vehicle_color = vehicle_colors[player_id]
+
+            # 创建矩形对象并绘制
+            pygame.draw.polygon(screen, vehicle_color, rotated_corners)
+
+            # 更新车辆的索引
+            car_indices[player_id] += 1
+            # 如果车的索引超过数据长度,重置为0
+            if car_indices[player_id] >= len(car_df):
+                car_indices[player_id] = 0
+
+    # 更新显示
+    pygame.display.flip()
+
+    # 控制帧率
+    pygame.time.Clock().tick(60)  # 每秒60帧

+ 191 - 0
tools/playback_shp.py

@@ -0,0 +1,191 @@
+import pandas as pd
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+from matplotlib.animation import FuncAnimation
+import os
+import geopandas as gpd
+
+# 位置偏移常量
+# X_OFFSET = 258109.4239876
+# Y_OFFSET = 4149969.964821
+X_OFFSET = 0
+Y_OFFSET = 0
+
+# 读取数据
+csv_data_path = "/home/kevin/kevin/zhaoyuan/sqlite3_demo/docker_build/preprocess_run/dataPreProcess/output/V2I_CSAE53-2020_RedLightViolationW_LST_01-01/"
+# lane_map_df = pd.read_csv(os.path.join(csv_data_path, 'LaneMap.csv'))
+car_df = pd.read_csv(os.path.join(csv_data_path, "ObjState.csv"))
+
+# 创建颜色映射
+# unique_vehicle_ids = car_df['playerId'].unique()
+unique_vehicle_ids = (
+    car_df["playerId"].unique()
+    if "playerId" in car_df.columns
+    else car_df.assign(playerId=1)["playerId"].unique()
+)
+
+colors = [
+    "red",
+    "blue",
+    "green",
+    "orange",
+    "purple",
+    "cyan",
+    "magenta",
+    "yellow",
+    "black",
+    "brown",
+]
+vehicle_colors = {
+    vehicle_id: colors[i] for i, vehicle_id in enumerate(unique_vehicle_ids)
+}
+
+# 依据时间分组车辆数据,按 simTime 进行分组
+grouped_car_data = car_df.groupby("simTime")
+
+# 加载地图数据
+gdfs = []
+folder_path = (
+    "/home/kevin/kevin/zhaoyuan/zhaoyuan_0227_v0.3/zhaoyuan/modules/map/data/shp"
+)
+file_names = [
+    "hd_arrow.shp",
+    "hd_lane_change.shp",
+    "hd_link_change.shp",
+    "hd_parkingspace.shp",
+    "hd_boundary_change.shp",
+    "hd_crosswalk.shp",
+    "hd_intersection.shp",
+    "hd_link_boundary.shp",
+    "hd_noparkingarea.shp",
+    "hd_trafficlight.shp",
+    "hd_stopline.shp",
+    "hd_lane.shp",
+]
+
+for file_name in file_names:
+    gdf = gpd.read_file(f"{folder_path}/{file_name}")
+    gdfs.append(gdf)
+
+gdfs = [gdf for gdf in gdfs if not gdf.empty]
+all_gdf = gpd.GeoDataFrame(pd.concat(gdfs, ignore_index=True))
+
+lane_gdf = gdfs[file_names.index("hd_lane.shp")]
+lane_gdf["geometry"] = lane_gdf.apply(
+    lambda row: row["geometry"].buffer(row["lane_width"] / 2), axis=1
+)
+
+# 设置画布
+fig, ax = plt.subplots(figsize=(12, 8))
+lane_gdf.plot(ax=ax, color="gray", edgecolor="black", alpha=0.7)
+all_gdf.plot(ax=ax, edgecolor="black", alpha=0.5, linestyle="--")
+
+
+
+# 查找车辆位置的极值
+min_x = car_df["posX"].min() + X_OFFSET
+max_x = car_df["posX"].max() + X_OFFSET
+min_y = car_df["posY"].min() + Y_OFFSET
+max_y = car_df["posY"].max() + Y_OFFSET
+
+# 添加缓冲区
+buffer_x = 50  # 可以根据需要修改
+buffer_y = 50  # 可以根据需要修改
+
+# 设置适当的 x 和 y 轴范围
+x_min, x_max = min_x - buffer_x, max_x + buffer_x
+y_min, y_max = min_y - buffer_y, max_y + buffer_y
+
+ax.set_xlim(x_min, x_max)
+ax.set_ylim(y_min, y_max)
+
+
+# 动态绘制车辆
+def update(frame):
+    # 清除之前的矩形
+    for patch in ax.patches:
+        patch.remove()
+    for text in ax.texts:
+        text.remove()
+
+    current_time = list(grouped_car_data.groups.keys())[frame]
+    current_data = grouped_car_data.get_group(current_time)
+
+    for index, row in current_data.iterrows():
+        posX = row["posX"] + X_OFFSET
+        posY = row["posY"] + Y_OFFSET
+        print(f"row[posH]: {row['posH']}")
+        print(f"row['speedH']: {row['speedH']}")
+        # posH =np.pi/ 2 -  math.radians(row["posH"]) 
+        posH =np.pi/ 2 -np.deg2rad(row["posH"]) 
+        # posH = np.pi/ 2
+        print(f"posH: {posH}")
+        # dimX = row['dimX']np.deg2rad(degrees)
+        # dimY = row['dimY']
+        dimX = 5.99
+        dimY = 2.065
+
+        corners = np.array(
+            [
+                [-dimX / 2, -dimY / 2],
+                [dimX / 2, -dimY / 2],
+                [dimX / 2, dimY / 2],
+                [-dimX / 2, dimY / 2],
+            ]
+        )
+
+        
+        # rotation_matrix = np.array(
+        #     [[np.cos(posH), -np.sin(posH)], [np.sin(posH), np.cos(posH)]]
+        # )
+        # rotated_corners = corners.dot(rotation_matrix) + np.array([posX, posY])
+
+        
+
+        # # 更安全的坐标转换
+        # rotated_corners = np.dot(corners, rotation_matrix) + np.array([[posX], [posY]])
+
+        rotation_matrix = np.array(
+            [[np.cos(posH), np.sin(posH)], [-np.sin(posH), np.cos(posH)]]
+        )
+        
+
+        # 关键修复:使用广播机制自动对齐形状
+        rotated_corners = np.dot(corners, rotation_matrix) + [posX, posY]
+
+
+        vehicle_color = vehicle_colors[row["playerId"]]
+
+        vehicle = patches.Polygon(
+            rotated_corners,
+            closed=True,
+            fill=True,
+            color=vehicle_color,
+            alpha=0.5,
+            edgecolor="black",
+        )
+        ax.add_patch(vehicle)
+        # 添加文本标注,显示坐标值
+        ax.text(
+            posX,
+            posY,
+            f"({posX:.1f}, {posY:.1f})",
+            fontsize=8,
+            ha="center",
+            va="center",
+            color="black",
+        )
+        # 添加黑色圆点,显示车辆坐标位置
+        # ax.dot(posX, posY, 'o', color='black', markersize=1)  # 你可以调整markersize来改变圆点大小
+    plt.title(f"Time: {current_time:.5f}s")
+
+
+# 设置动画,frames为时间段的数量
+ani = FuncAnimation(
+    fig, update, frames=len(grouped_car_data.groups), repeat=True, interval=0.1
+)
+
+# 添加标题并显示地图
+plt.title("Map Visualization with Vehicle Animation", fontsize=15)
+plt.show()

File diff suppressed because it is too large
+ 0 - 1
trafficlights.json


Some files were not shown because too many files changed in this diff