|
@@ -10,11 +10,12 @@ import subprocess
|
|
|
import numpy as np
|
|
|
import pandas as pd
|
|
|
from collections import Counter
|
|
|
-from datetime import datetime
|
|
|
+# from datetime import datetime
|
|
|
import argparse
|
|
|
import sys
|
|
|
from pyproj import Proj
|
|
|
-from bagpy.bagreader import bagreader
|
|
|
+from scipy.spatial import cKDTree
|
|
|
+
|
|
|
import shutil
|
|
|
import json
|
|
|
from dataclasses import dataclass, field
|
|
@@ -22,7 +23,9 @@ from dataclasses import dataclass, field
|
|
|
# --- Constants ---
|
|
|
PLAYER_ID_EGO = int(1)
|
|
|
PLAYER_ID_OBJ = int(2)
|
|
|
+PLAYER_ID_PEDESTRIAN = int(5)
|
|
|
DEFAULT_TYPE = int(1)
|
|
|
+PEDESTRIAN_TYPE = int(5)
|
|
|
OUTPUT_CSV_OBJSTATE = "ObjState.csv"
|
|
|
OUTPUT_CSV_TEMP_OBJSTATE = "ObjState_temp_intermediate.csv" # Should be eliminated
|
|
|
OUTPUT_CSV_EGOSTATE = "EgoState.csv" # Not used in final merge? Check logic if needed.
|
|
@@ -30,6 +33,7 @@ OUTPUT_CSV_MERGED = "merged_ObjState.csv"
|
|
|
OUTPUT_CSV_OBU = "OBUdata.csv"
|
|
|
OUTPUT_CSV_LANEMAP = "LaneMap.csv"
|
|
|
OUTPUT_CSV_EGOMAP = "EgoMap.csv"
|
|
|
+MERGED_CSV_EGOMAP = "merged_egomap.csv"
|
|
|
OUTPUT_CSV_FUNCTION = "Function.csv"
|
|
|
ROADMARK_CSV = "RoadMark.csv"
|
|
|
HD_LANE_CSV = "hd_lane.csv"
|
|
@@ -70,7 +74,8 @@ class ZipCSVProcessor:
|
|
|
"travelDist", "composite_v", "relative_dist", "x_relative_dist", "y_relative_dist", "type" # Added type
|
|
|
]
|
|
|
OBJ_COLS_OLD_SUFFIXED = [
|
|
|
- "v_obj", "speedX_obj", "speedY_obj", "speedZ_obj", "posH_obj", "pitch_obj", "roll_obj", "roll_rate_obj", "pitch_rate_obj", "speedH_obj",
|
|
|
+ "v_obj", "speedX_obj", "speedY_obj", "speedZ_obj", "posH_obj", "pitch_obj", "roll_obj", "roll_rate_obj",
|
|
|
+ "pitch_rate_obj", "speedH_obj",
|
|
|
"posX_obj", "posY_obj", "accelX_obj", "accelY_obj", "accelZ_obj", "travelDist_obj"
|
|
|
]
|
|
|
OBJ_COLS_MAPPING = {old: new for old, new in
|
|
@@ -84,6 +89,7 @@ class ZipCSVProcessor:
|
|
|
self._init_keyword_mapping()
|
|
|
|
|
|
def _load_dbc(self, dbc_path: Optional[Path]) -> Optional[cantools.db.Database]:
|
|
|
+ dbc_path = Path(dbc_path)
|
|
|
if not dbc_path or not dbc_path.exists():
|
|
|
print("DBC path not provided or file not found.")
|
|
|
return None
|
|
@@ -114,8 +120,6 @@ class ZipCSVProcessor:
|
|
|
"posH": "yaw",
|
|
|
"pitch": "tilt",
|
|
|
"roll": "roll",
|
|
|
- "roll_rate": "roll_rate",
|
|
|
- "pitch_rate": "tilt_rate",
|
|
|
"speedH": "yaw_rate",
|
|
|
"posX": "latitude_dd", # Source before projection
|
|
|
"posY": "longitude_dd", # Source before projection
|
|
@@ -130,9 +134,10 @@ class ZipCSVProcessor:
|
|
|
"y_relative_dist": "y_distance",
|
|
|
"type": None # Will be set later
|
|
|
},
|
|
|
- "db_columns": ["ID", "second", "usecond", "speed", "y_speed", "x_speed", "z_speed", "tilt_rate", "z_acceleration",
|
|
|
- "yaw", "tilt", "roll", "yaw_rate", "latitude_dd", "longitude_dd", "roll_rate", "total_distance",
|
|
|
- "x_acceleration", "y_acceleration", "total_distance", "distance", "x_distance", "y_distance"] # Actual cols to SELECT
|
|
|
+ "db_columns": ["ID", "second", "usecond", "speed", "y_speed", "x_speed", "z_speed", "z_acceleration",
|
|
|
+ "yaw", "tilt", "roll", "yaw_rate", "latitude_dd", "longitude_dd", "total_distance",
|
|
|
+ "x_acceleration", "y_acceleration", "total_distance", "distance", "x_distance", "y_distance"]
|
|
|
+ # Actual cols to SELECT
|
|
|
}
|
|
|
|
|
|
def _get_can_config(self):
|
|
@@ -141,8 +146,8 @@ class ZipCSVProcessor:
|
|
|
"mapping": { # Map unified output columns to CAN signals or direct fields
|
|
|
# EGO mappings (VUT = Vehicle Under Test)
|
|
|
"v": "VUT_Speed_mps",
|
|
|
- "speedX": "VUT_Speed_x_mps",
|
|
|
- "speedY": "VUT_Speed_y_mps",
|
|
|
+ "speedX": "VUT_Speed_long_mps",
|
|
|
+ "speedY": "VUT_Speed_lat_mps",
|
|
|
"speedZ": "VUT_Speed_z_mps",
|
|
|
"speedH": "VUT_Yaw_Rate",
|
|
|
"posX": "VUT_GPS_Latitude", # Source before projection
|
|
@@ -153,12 +158,12 @@ class ZipCSVProcessor:
|
|
|
"pitch_rate": None,
|
|
|
"roll_rate": None,
|
|
|
"accelX": "VUT_Acc_X",
|
|
|
- "accelY": "VUT_Acc_Y",
|
|
|
+ "accelY": "VUT_Acc_Y2",
|
|
|
"accelZ": "VUT_Acc_Z",
|
|
|
# OBJ mappings (UFO = Unidentified Flying Object / Other Vehicle)
|
|
|
"v_obj": "Speed_mps",
|
|
|
- "speedX_obj": "UFO_Speed_x_mps",
|
|
|
- "speedY_obj": "UFO_Speed_y_mps",
|
|
|
+ "speedX_obj": "UFO_Speed_long_mps",
|
|
|
+ "speedY_obj": "UFO_Speed_lat_mps",
|
|
|
"speedZ_obj": "UFO_Speed_z_mps",
|
|
|
"speedH_obj": "Yaw_Rate",
|
|
|
"posX_obj": "GPS_Latitude", # Source before projection
|
|
@@ -174,8 +179,8 @@ class ZipCSVProcessor:
|
|
|
# Relative Mappings
|
|
|
"composite_v": "VUT_Rel_speed_long_mps",
|
|
|
"relative_dist": "VUT_Dist_MRP_Abs",
|
|
|
- "x_relative_dist": "VUT_Dist_MRP_X",
|
|
|
- "y_relative_dist": "VUT_Dist_MRP_Y",
|
|
|
+ "x_relative_dist": "VUT_Dist_MRP_long",
|
|
|
+ "y_relative_dist": "VUT_Dist_MRP_lat",
|
|
|
# travelDist often calculated, not direct CAN signal
|
|
|
"travelDist": None, # Placeholder
|
|
|
"travelDist_obj": None # Placeholder
|
|
@@ -188,7 +193,7 @@ class ZipCSVProcessor:
|
|
|
self.keyword_mapping = {
|
|
|
"gnss": ("gnss_table", OUTPUT_CSV_OBJSTATE),
|
|
|
# GNSS likely represents ego, writing to ObjState first? Revisit logic if needed.
|
|
|
- "can2": ("can_table", OUTPUT_CSV_OBJSTATE), # Process CAN data into the combined ObjState file
|
|
|
+ "can4": ("can_table", OUTPUT_CSV_OBJSTATE), # Process CAN data into the combined ObjState file
|
|
|
}
|
|
|
|
|
|
def process_zip(self) -> None:
|
|
@@ -224,7 +229,7 @@ class ZipCSVProcessor:
|
|
|
shutil.copyfileobj(source, target)
|
|
|
|
|
|
# Process the extracted DB file
|
|
|
- self._process_db_file(extracted_path, output_dir, table_type, csv_name)
|
|
|
+ self._process_db_file(file_info.filename, extracted_path, output_dir, table_type, csv_name)
|
|
|
|
|
|
except (sqlite3.Error, pd.errors.EmptyDataError, FileNotFoundError, KeyError) as e:
|
|
|
print(f"Error processing DB file {file_info.filename}: {e}")
|
|
@@ -249,7 +254,7 @@ class ZipCSVProcessor:
|
|
|
return None
|
|
|
|
|
|
def _process_db_file(
|
|
|
- self, db_path: Path, output_dir: Path, table_type: str, csv_name: str
|
|
|
+ self, filename: str, db_path: Path, output_dir: Path, table_type: str, csv_name: str
|
|
|
) -> None:
|
|
|
"""Connects to SQLite DB and processes the specified table type."""
|
|
|
output_csv_path = output_dir / csv_name
|
|
@@ -267,7 +272,7 @@ class ZipCSVProcessor:
|
|
|
|
|
|
print(f"Exporting data from table '{table_type}' to {output_csv_path}")
|
|
|
if table_type == "can_table":
|
|
|
- self._process_can_table_optimized(cursor, output_csv_path)
|
|
|
+ self._process_can_table_optimized(filename, cursor, output_csv_path)
|
|
|
elif table_type == "gnss_table":
|
|
|
# Pass output_path directly, avoid intermediate steps
|
|
|
self._process_gnss_table(cursor, output_csv_path)
|
|
@@ -376,9 +381,29 @@ class ZipCSVProcessor:
|
|
|
processed_data.append(record)
|
|
|
|
|
|
if processed_data:
|
|
|
- df_final = pd.DataFrame(processed_data)[output_columns].iloc[::4].reset_index(drop=True) # Ensure column order
|
|
|
+ df_final = pd.DataFrame(processed_data)[output_columns].iloc[::4].reset_index(
|
|
|
+ drop=True) # Ensure column order
|
|
|
+ # df_final.to_csv("/home/output/V2I_CSAE53-2020_LeftTurnAssist_LST_01/ObjState_old.csv", index=False, encoding="utf-8")
|
|
|
+ # print(df_final)
|
|
|
+ # df_final['speedY'] = -df_final['speedY']
|
|
|
+ # df_final['accelY'] = -df_final['accelY']
|
|
|
+
|
|
|
+ # df_final['speedZ'] = -df_final['speedZ']
|
|
|
+ # df_final['accelZ'] = -df_final['accelZ']
|
|
|
df_final['simFrame'] = np.arange(1, len(df_final) + 1)
|
|
|
- df_final.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
+ df_final["pitch_rate"] = df_final["pitch"].diff() / df_final["simTime"].diff()
|
|
|
+ df_final["roll_rate"] = df_final["roll"].diff() / df_final["simTime"].diff()
|
|
|
+ # print("df_final[\"posH\"] is", df_final["posH"])
|
|
|
+ df_final["posH"] = (90 - df_final["posH"])
|
|
|
+ stopcar_flag = self.is_valid_interval(df_final)
|
|
|
+ # print("stopcar_flag is", stopcar_flag)
|
|
|
+ if stopcar_flag:
|
|
|
+ first_gt_1 = df_final['v'].gt(1).idxmax()
|
|
|
+ last_gt_1 = df_final['v'].gt(0.15)[::-1].idxmax()
|
|
|
+ result_df = df_final.loc[first_gt_1:last_gt_1].copy()
|
|
|
+ result_df.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
+ else:
|
|
|
+ df_final.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
print(f"Successfully wrote GNSS data to {output_path}")
|
|
|
else:
|
|
|
print("No processable records found in gnss_table.")
|
|
@@ -388,7 +413,7 @@ class ZipCSVProcessor:
|
|
|
except Exception as e:
|
|
|
print(f"Unexpected error during GNSS processing: {e}")
|
|
|
|
|
|
- def _process_can_table_optimized(self, cursor, output_path: Path) -> None:
|
|
|
+ def _process_can_table_optimized(self, filename, cursor, output_path: Path) -> None:
|
|
|
"""Processes CAN data directly into the final merged DataFrame format."""
|
|
|
config = self.table_config["can_table"]
|
|
|
db_columns = config["db_columns"]
|
|
@@ -420,15 +445,24 @@ class ZipCSVProcessor:
|
|
|
|
|
|
# Separate EGO and OBJ data based on available columns
|
|
|
df_ego = self._extract_vehicle_data(df_raw, PLAYER_ID_EGO)
|
|
|
+ # if 'pedestrian' in filename.lower():
|
|
|
+ # df_obj = self._extract_vehicle_data(df_raw, PLAYER_ID_PEDESTRIAN)
|
|
|
+ # else:
|
|
|
df_obj = self._extract_vehicle_data(df_raw, PLAYER_ID_OBJ)
|
|
|
|
|
|
# Project coordinates
|
|
|
df_ego = self._project_coordinates(df_ego, 'posX', 'posY')
|
|
|
+ # df_ego = self._project_coordinates(df_ego, 'posX', 'posY', 'speedX', 'speedY', 'speedZ', 'accelX', 'accelY', 'accelZ', 'posH', 'pitch', 'roll')
|
|
|
+ print("df_ego is", df_ego.columns)
|
|
|
df_obj = self._project_coordinates(df_obj, 'posX', 'posY') # Use same column names after extraction
|
|
|
+ # df_obj = self._project_coordinates(df_obj, 'posX', 'posY', 'speedX', 'speedY', 'speedZ', 'accelX', 'accelY', 'accelZ', 'posH', 'pitch', 'roll') # Use same column names after extraction
|
|
|
|
|
|
# Add calculated/default columns
|
|
|
df_ego['type'] = DEFAULT_TYPE
|
|
|
- df_obj['type'] = DEFAULT_TYPE
|
|
|
+ if 'pedestrian' in filename.lower():
|
|
|
+ df_obj['type'] = PEDESTRIAN_TYPE
|
|
|
+ else:
|
|
|
+ df_obj['type'] = DEFAULT_TYPE
|
|
|
# Note: travelDist is often calculated later or not available directly
|
|
|
|
|
|
# Ensure both have the same columns before merging
|
|
@@ -437,23 +471,31 @@ class ZipCSVProcessor:
|
|
|
df_obj = df_obj.reindex(columns=final_columns).iloc[::4]
|
|
|
|
|
|
# Reindex simFrame of ego and obj
|
|
|
- df_ego['simFrame'] = np.arange(1, len(df_ego)+1)
|
|
|
- df_obj['simFrame'] = np.arange(1, len(df_obj)+1)
|
|
|
+ df_ego['simFrame'] = np.arange(1, len(df_ego) + 1)
|
|
|
+ df_obj['simFrame'] = np.arange(1, len(df_obj) + 1)
|
|
|
|
|
|
# Merge EGO and OBJ dataframes
|
|
|
df_merged = pd.concat([df_ego, df_obj], ignore_index=True)
|
|
|
|
|
|
# Sort and clean up
|
|
|
df_merged.sort_values(by=["simTime", "simFrame", "playerId"], inplace=True)
|
|
|
- df_merged.fillna(0, inplace = True)
|
|
|
+ df_merged.fillna(0, inplace=True)
|
|
|
df_merged.reset_index(drop=True, inplace=True)
|
|
|
|
|
|
# Fill potential NaNs introduced by reindexing or missing data
|
|
|
# Choose appropriate fill strategy (e.g., 0, forward fill, or leave as NaN)
|
|
|
# df_merged.fillna(0.0, inplace=True) # Example: fill with 0.0
|
|
|
-
|
|
|
- # Save the final merged DataFrame
|
|
|
- df_merged.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
+ stopcar_flag = self.is_valid_interval(df_merged)
|
|
|
+ print("stopcar_flag is", stopcar_flag)
|
|
|
+ if stopcar_flag:
|
|
|
+ print("筛选非静止车辆数据!")
|
|
|
+ first_gt_01 = df_merged[df_merged['playerId'] == 1]['v'].gt(1).idxmax()
|
|
|
+ last_gt_01 = df_merged[df_merged['playerId'] == 1]['v'].gt(0.15)[::-1].idxmax()
|
|
|
+ result_df = df_merged.loc[first_gt_01:last_gt_01 - 1].copy()
|
|
|
+ # Save the final merged DataFrame
|
|
|
+ result_df.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
+ else:
|
|
|
+ df_merged.to_csv(output_path, index=False, encoding="utf-8")
|
|
|
print(f"Successfully processed CAN data and wrote merged output to {output_path}")
|
|
|
|
|
|
except sqlite3.Error as e:
|
|
@@ -540,6 +582,7 @@ class ZipCSVProcessor:
|
|
|
# Select EGO columns (not ending in _obj) + relative columns
|
|
|
ego_cols = {target: source for target, source in self.table_config['can_table']['mapping'].items()
|
|
|
if source and not isinstance(source, tuple) and not target.endswith('_obj')}
|
|
|
+ print("ego_cols is", ego_cols)
|
|
|
rename_map = {}
|
|
|
select_cols_raw = []
|
|
|
for target_col, source_info in ego_cols.items():
|
|
@@ -559,8 +602,7 @@ class ZipCSVProcessor:
|
|
|
df_vehicle_temps_ego[col] = df_vehicle_temp[col].dropna().reset_index(drop=True)
|
|
|
df_vehicle = pd.concat([df_vehicle, df_vehicle_temps_ego], axis=1)
|
|
|
|
|
|
-
|
|
|
- elif player_id == PLAYER_ID_OBJ:
|
|
|
+ elif (player_id == PLAYER_ID_OBJ) or (player_id == PLAYER_ID_PEDESTRIAN):
|
|
|
# Select OBJ columns (ending in _obj)
|
|
|
obj_cols = {target: source for target, source in self.table_config['can_table']['mapping'].items()
|
|
|
if source and not isinstance(source, tuple) and target.endswith('_obj')}
|
|
@@ -592,12 +634,15 @@ class ZipCSVProcessor:
|
|
|
# df_vehicle.dropna(subset=[col for col in required_pos if col in df_vehicle.columns], inplace=True)
|
|
|
|
|
|
try:
|
|
|
- df_vehicle["simTime"] = np.round(np.linspace(df_raw["simTime"].tolist()[0]+28800, df_raw["simTime"].tolist()[0]+28800 + 0.01*(len(df_vehicle)), len(df_vehicle)), 2)
|
|
|
+ df_vehicle["simTime"] = np.round(
|
|
|
+ np.linspace(df_raw["simTime"].tolist()[0], df_raw["simTime"].tolist()[0] + 0.01 * (len(df_vehicle)),
|
|
|
+ len(df_vehicle)), 2)
|
|
|
df_vehicle["simFrame"] = np.arange(1, len(df_vehicle) + 1)
|
|
|
df_vehicle["playerId"] = int(player_id)
|
|
|
df_vehicle['playerId'] = pd.to_numeric(df_vehicle['playerId']).astype(int)
|
|
|
df_vehicle["pitch_rate"] = df_vehicle["pitch"].diff() / df_vehicle["simTime"].diff()
|
|
|
df_vehicle["roll_rate"] = df_vehicle["roll"].diff() / df_vehicle["simTime"].diff()
|
|
|
+ # print("df_vehicle is", df_vehicle)
|
|
|
except ValueError as ve:
|
|
|
print(f"{ve}")
|
|
|
except TypeError as te:
|
|
@@ -613,6 +658,15 @@ class ZipCSVProcessor:
|
|
|
# Ensure data is numeric and handle potential errors/missing values
|
|
|
lat = pd.to_numeric(df[lat_col], errors='coerce')
|
|
|
lon = pd.to_numeric(df[lon_col], errors='coerce')
|
|
|
+ # speedX = pd.to_numeric(df[speedX_col], errors='coerce')
|
|
|
+ # speedY = pd.to_numeric(df[speedY_col], errors='coerce')
|
|
|
+ # speedZ = pd.to_numeric(0, errors='coerce')
|
|
|
+ # accelX = pd.to_numeric(df[accelX_col], errors='coerce')
|
|
|
+ # accelY = pd.to_numeric(df[accelY_col], errors='coerce')
|
|
|
+ # accelZ = pd.to_numeric(0, errors='coerce')
|
|
|
+ # posh = pd.to_numeric(df[posh_col], errors='coerce')
|
|
|
+ # pitch = pd.to_numeric(df[pitch_col], errors='coerce')
|
|
|
+ # roll = pd.to_numeric(df[roll_col], errors='coerce')
|
|
|
valid_coords = lat.notna() & lon.notna()
|
|
|
|
|
|
if valid_coords.any():
|
|
@@ -628,6 +682,7 @@ class ZipCSVProcessor:
|
|
|
|
|
|
# Rename columns AFTER projection for clarity
|
|
|
df.rename(columns={lat_col: 'posX', lon_col: 'posY'}, inplace=True)
|
|
|
+
|
|
|
else:
|
|
|
# Ensure columns exist even if projection didn't happen
|
|
|
if 'posX' not in df.columns: df['posX'] = np.nan
|
|
@@ -636,13 +691,34 @@ class ZipCSVProcessor:
|
|
|
|
|
|
return df
|
|
|
|
|
|
+ def is_valid_interval(self, df, threshold=1, window_sec=0.1):
|
|
|
+ '''
|
|
|
+ 检查区间[start_idx, end_idx]是否满足前后window_sec秒的速度均 < threshold
|
|
|
+ '''
|
|
|
+ # 获取时间范围
|
|
|
+ print("获取时间范围...")
|
|
|
+ start_time = df['simTime'].tolist()[0]
|
|
|
+ end_time = df['simTime'].tolist()[-1]
|
|
|
+
|
|
|
+ # 前5秒数据
|
|
|
+ # print("前5秒数据...")
|
|
|
+ mask_before = (df['simTime'] >= start_time) & \
|
|
|
+ (df['simTime'] < start_time + window_sec)
|
|
|
+ # 后5秒数据
|
|
|
+ mask_after = (df['simTime'] < end_time) & \
|
|
|
+ (df['simTime'] >= end_time - window_sec)
|
|
|
+
|
|
|
+ # 判断前后是否均 < threshold
|
|
|
+ return (df.loc[mask_before, 'v'].max() < threshold) and \
|
|
|
+ (df.loc[mask_after, 'v'].max() < threshold)
|
|
|
+
|
|
|
|
|
|
# --- Polynomial Fitting (Largely unchanged, minor cleanup) ---
|
|
|
class PolynomialCurvatureFitting:
|
|
|
"""Calculates curvature and its derivative using polynomial fitting."""
|
|
|
|
|
|
def __init__(self, lane_map_path: Path, degree: int = 3):
|
|
|
- self.lane_map_path = lane_map_path
|
|
|
+ self.lane_map_path = Path(lane_map_path)
|
|
|
self.degree = degree
|
|
|
self.data = self._load_data()
|
|
|
if self.data is not None:
|
|
@@ -654,6 +730,7 @@ class PolynomialCurvatureFitting:
|
|
|
|
|
|
def _load_data(self) -> Optional[pd.DataFrame]:
|
|
|
"""Loads lane map data safely."""
|
|
|
+
|
|
|
if not self.lane_map_path.exists() or self.lane_map_path.stat().st_size == 0:
|
|
|
print(f"Warning: LaneMap file not found or empty: {self.lane_map_path}")
|
|
|
return None
|
|
@@ -1049,6 +1126,38 @@ class FinalDataProcessor:
|
|
|
self.config = config
|
|
|
self.output_dir = config.output_dir
|
|
|
|
|
|
+ def _axis_to_ENU(self, speedX, speedY, speedZ, accelX, accelY, accelZ, posH, pitch, roll):
|
|
|
+ posh_ENU = posH % 360
|
|
|
+ posh_ENU = posh_ENU * np.pi / 180
|
|
|
+ pitch = pitch * np.pi / 180
|
|
|
+ roll = roll * np.pi / 180
|
|
|
+ east_speedX, north_speedY, north_speedZ = [], [], []
|
|
|
+ east_accelX, north_accelY, north_accelZ = [], [], []
|
|
|
+ for i in range(len(posH)):
|
|
|
+ sy = np.sin(posh_ENU[i])
|
|
|
+ cy = np.cos(posh_ENU[i])
|
|
|
+ cp = np.cos(pitch[i])
|
|
|
+ sp = np.sin(pitch[i])
|
|
|
+ cr = np.cos(roll[i])
|
|
|
+ sr = np.sin(roll[i])
|
|
|
+ trametrix = np.array([[sy * cp, sy * sp * sr - cy * cr, sy * sp * cr + cy * sr],
|
|
|
+ [cy * cp, cy * sp * sr + sy * cr, cy * sp * cr - sy * sr], [-sp, cp * sr, cp * cr]])
|
|
|
+ # trametrix = np.array([[sy, cy], [-cy, sy]])
|
|
|
+ east_speedX_i, north_speedY_i, north_speedZ_i = np.linalg.pinv(trametrix) @ np.array(
|
|
|
+ [speedX[i], speedY[i], speedZ[i]])
|
|
|
+ # east_speedX_i, north_speedY_i = np.linalg.pinv(trametrix) @ np.array([speedX[i], speedY[i]])
|
|
|
+ east_accelX_i, north_accelY_i, north_accelZ_i = np.linalg.pinv(trametrix) @ np.array(
|
|
|
+ [accelX[i], accelY[i], accelZ[i]])
|
|
|
+ # east_accelX_i, north_accelY_i = np.linalg.pinv(trametrix) @ np.array([accelX[i], accelY[i]])
|
|
|
+ east_speedX.append(east_speedX_i)
|
|
|
+ north_speedY.append(north_speedY_i)
|
|
|
+ north_speedZ.append(north_speedZ_i)
|
|
|
+ east_accelX.append(east_accelX_i)
|
|
|
+ north_accelY.append(north_accelY_i)
|
|
|
+ north_accelZ.append(north_accelZ_i)
|
|
|
+ return east_speedX, north_speedY, speedZ, east_accelX, north_accelY, accelZ
|
|
|
+ # return east_speedX, north_speedY, east_accelX, north_accelY
|
|
|
+
|
|
|
def process(self) -> bool:
|
|
|
"""执行最终数据合并和处理步骤。"""
|
|
|
print("--- Starting Final Data Processing ---")
|
|
@@ -1069,6 +1178,33 @@ class FinalDataProcessor:
|
|
|
print(f"Successfully created traffic light data file: {traffic_csv_path}")
|
|
|
# Load and process data
|
|
|
df_object = pd.read_csv(obj_state_path, dtype={"simTime": float}, low_memory=False)
|
|
|
+ # 坐标转换
|
|
|
+ speedX = df_object['speedX']
|
|
|
+ speedY = df_object['speedY']
|
|
|
+ speedZ = df_object['speedZ']
|
|
|
+ accelX = df_object['accelX']
|
|
|
+ accelY = df_object['accelY']
|
|
|
+ accelZ = df_object['accelZ']
|
|
|
+ posH = df_object['posH']
|
|
|
+ pitch = df_object['pitch']
|
|
|
+ roll = df_object['roll']
|
|
|
+ east_speedX, north_speedY, north_speedZ, east_accelX, north_accelY, north_accelZ = self._axis_to_ENU(speedX,
|
|
|
+ speedY,
|
|
|
+ speedZ,
|
|
|
+ accelX,
|
|
|
+ accelY,
|
|
|
+ accelZ,
|
|
|
+ posH,
|
|
|
+ pitch,
|
|
|
+ roll)
|
|
|
+ # east_speedX, north_speedY, east_accelX, north_accelY = self._axis_to_ENU(speedX, speedY, speedZ, accelX, accelY, accelZ, posH, pitch, roll)
|
|
|
+ df_object['speedX'] = east_speedX
|
|
|
+ df_object['speedY'] = north_speedY
|
|
|
+ df_object['speedZ'] = north_speedZ
|
|
|
+ df_object['accelX'] = east_accelX
|
|
|
+ df_object['accelY'] = north_accelY
|
|
|
+ df_object['accelZ'] = north_accelZ
|
|
|
+
|
|
|
df_ego = df_object[df_object["playerId"] == 1]
|
|
|
points = df_ego[["posX", "posY"]].values
|
|
|
window_size = 4
|
|
@@ -1089,7 +1225,7 @@ class FinalDataProcessor:
|
|
|
print("计算值的长度与 playerId == 1 的行数不匹配!")
|
|
|
# Process and merge data
|
|
|
df_merged = self._merge_optional_data(df_object)
|
|
|
- df_merged[['speedH', 'accelX', 'accelY']] = -df_merged[['speedH', 'accelX', 'accelY']]
|
|
|
+ # df_merged[['speedH', 'accelX']] = -df_merged[['speedH', 'accelX']]
|
|
|
|
|
|
# Save final merged file directly to output directory
|
|
|
merged_csv_path = self.output_dir / OUTPUT_CSV_MERGED
|
|
@@ -1180,13 +1316,14 @@ class FinalDataProcessor:
|
|
|
|
|
|
# --- 合并 EgoMap ---
|
|
|
egomap_path = self.output_dir / OUTPUT_CSV_EGOMAP
|
|
|
+ merged_egomap_path = self.output_dir / MERGED_CSV_EGOMAP
|
|
|
if egomap_path.exists() and egomap_path.stat().st_size > 0:
|
|
|
try:
|
|
|
df_ego = pd.read_csv(egomap_path, dtype={"simTime": float})
|
|
|
ego_column = ['posX', 'posY', 'posH']
|
|
|
ego_new_column = ['posX_map', 'posY_map', 'posH_map']
|
|
|
- df_ego = df_ego.rename(columns = dict(zip(ego_column, ego_new_column)))
|
|
|
- # 删除 simFrame 列,因为使用主数据的 simFrame
|
|
|
+ df_ego = df_ego.rename(columns=dict(zip(ego_column, ego_new_column)))
|
|
|
+ # 删除 simFrame 列,因为使用主数据的 simFrame
|
|
|
if 'simFrame' in df_ego.columns:
|
|
|
df_ego = df_ego.drop(columns=['simFrame'])
|
|
|
|
|
@@ -1199,13 +1336,19 @@ class FinalDataProcessor:
|
|
|
df_merged.sort_values(['simTime', 'playerId'], inplace=True)
|
|
|
|
|
|
# 使用 merge_asof 进行就近合并,不包括 simFrame
|
|
|
- df_merged = pd.merge_asof(
|
|
|
+ # df_merged = pd.merge_asof(
|
|
|
+ # df_merged,
|
|
|
+ # df_ego,
|
|
|
+ # on='simTime',
|
|
|
+ # by='playerId',
|
|
|
+ # direction='nearest',
|
|
|
+ # tolerance=0.01 # 10ms tolerance
|
|
|
+ # )
|
|
|
+ df_merged = pd.merge(
|
|
|
df_merged,
|
|
|
df_ego,
|
|
|
- on='simTime',
|
|
|
- by='playerId',
|
|
|
- direction='nearest',
|
|
|
- tolerance=0.01 # 10ms tolerance
|
|
|
+ how='left',
|
|
|
+ on='simTime'
|
|
|
)
|
|
|
|
|
|
# 打印合并后的列名
|
|
@@ -1219,7 +1362,8 @@ class FinalDataProcessor:
|
|
|
else:
|
|
|
print("警告: 合并 EgoMap 后找不到 simTime 列!")
|
|
|
|
|
|
- df_merged = df_merged.drop(columns = ['posX_map', 'posY_map', 'posH_map'])
|
|
|
+ df_merged = df_merged.drop(columns=['posX_map', 'posY_map', 'posH_map', 'stateMask'])
|
|
|
+ df_merged.to_csv(merged_egomap_path, index=False, float_format='%.6f')
|
|
|
|
|
|
print("EgoMap data merged.")
|
|
|
except Exception as e:
|
|
@@ -1232,34 +1376,6 @@ class FinalDataProcessor:
|
|
|
|
|
|
# --- 合并hd_lane.csv,hd_road.csv ---
|
|
|
current_file_path = os.path.abspath(__file__)
|
|
|
- # root_lane_csv_path1 = os.path.dirname(current_file_path)
|
|
|
- # root_lane_csv_path2 = os.path.dirname(root_lane_csv_path1)
|
|
|
- # root_lane_csv_path3 = os.path.dirname(root_lane_csv_path2)
|
|
|
- # root_lane_csv_path4 = os.path.dirname(root_lane_csv_path3)
|
|
|
- # lane_path = os.path.join(root_lane_csv_path4, "_internal")
|
|
|
- # data_path = os.path.join(lane_path, "data_map")
|
|
|
- # lane_csv_path = os.path.join(data_path, "hd_lane.csv")
|
|
|
- # road_csv_path = os.path.join(data_path, "hd_link.csv")
|
|
|
- # df_lane = pd.read_csv(lane_csv_path)
|
|
|
- # column_to_read = ['link_type', 'link_coords']
|
|
|
-
|
|
|
- # df_road = pd.read_csv(road_csv_path, usecols = column_to_read)
|
|
|
- # df_road["simFrame"] = np.arange(1, len(df_road) + 1, 1)
|
|
|
- # df_road = df_road.rename(columns={'link_type': 'road_type'})
|
|
|
-
|
|
|
-
|
|
|
- # df_merged = pd.merge(
|
|
|
- # df_merged,
|
|
|
- # df_lane,
|
|
|
- # on='lane_id',
|
|
|
- # how = 'left'
|
|
|
- # )
|
|
|
- # df_merged = pd.merge(
|
|
|
- # df_merged,
|
|
|
- # df_road,
|
|
|
- # on='simFrame',
|
|
|
- # how = 'left'
|
|
|
- # )
|
|
|
|
|
|
# --- 合并 Traffic ---
|
|
|
traffic_path = self.output_dir / "Traffic.csv"
|
|
@@ -1311,32 +1427,43 @@ class FinalDataProcessor:
|
|
|
7: 'S', # 南左转
|
|
|
8: 'W', # 西左转
|
|
|
9: 'N', # 北左转
|
|
|
- 10: 'E', # 东左转
|
|
|
- 11: 'N', # 北行人
|
|
|
- 12: 'E', # 东行人
|
|
|
- 13: 'S', # 南右转
|
|
|
- 14: 'W', # 西右转
|
|
|
- 15: 'N', # 北右转
|
|
|
+ 10: 'E', # 东左转
|
|
|
+ 11: 'N', # 北行人
|
|
|
+ 12: 'E', # 东行人
|
|
|
+ 13: 'S', # 南右转
|
|
|
+ 14: 'W', # 西右转
|
|
|
+ 15: 'N', # 北右转
|
|
|
16: 'E' # 东右转
|
|
|
}
|
|
|
|
|
|
# 创建 trafficlight_id 到方向的映射
|
|
|
trafficlight_to_direction = {
|
|
|
# 南向北方向的红绿灯
|
|
|
- 48100017: 'S',
|
|
|
- 48100038: 'S',
|
|
|
- 48100043: 'S',
|
|
|
- 48100030: 'S',
|
|
|
+ # 48100017: 'S',
|
|
|
+ # 48100038: 'S',
|
|
|
+ # 48100043: 'S',
|
|
|
+ # 48100030: 'S',
|
|
|
+ 48100017: 'N',
|
|
|
+ 48100038: 'N',
|
|
|
+ 48100043: 'N',
|
|
|
+ 48100030: 'N',
|
|
|
# 西向东方向的红绿灯
|
|
|
- 48100021: 'W',
|
|
|
- 48100039: 'W',
|
|
|
+ # 48100021: 'W',
|
|
|
+ # 48100039: 'W',
|
|
|
+ 48100021: 'E',
|
|
|
+ 48100039: 'E',
|
|
|
# 东向西方向的红绿灯
|
|
|
- 48100041: 'E',
|
|
|
- 48100019: 'E',
|
|
|
+ # 48100041: 'E',
|
|
|
+ # 48100019: 'E',
|
|
|
+ 48100041: 'W',
|
|
|
+ 48100019: 'W',
|
|
|
# 北向南方向的红绿灯
|
|
|
- 48100033: 'N',
|
|
|
- 48100018: 'N',
|
|
|
- 48100022: 'N'
|
|
|
+ # 48100033: 'N',
|
|
|
+ # 48100018: 'N',
|
|
|
+ # 48100022: 'N'
|
|
|
+ 48100033: 'S',
|
|
|
+ 48100018: 'S',
|
|
|
+ 48100022: 'S'
|
|
|
}
|
|
|
|
|
|
# 添加时间列用于合并
|
|
@@ -1354,12 +1481,23 @@ class FinalDataProcessor:
|
|
|
return df_merged
|
|
|
|
|
|
df_merged['time'] = df_merged['simTime'].round(2).astype(float)
|
|
|
+ tree = cKDTree(df_traffic[['simTime']])
|
|
|
|
|
|
+ # 查询df1中每个时间戳的最近邻
|
|
|
+ distances, indices = tree.query(df_merged[['simTime']], k=1)
|
|
|
+ # df_merged['time1'] = df_merged['simTime'].round(0).astype(float)
|
|
|
+ # df_traffic1 = df_traffic.rename(columns={'simTime' 'simTime1'})
|
|
|
+ # df_traffic['time1'] = df_traffic['time'].round(0).astype(float)
|
|
|
# 合并 Traffic 数据
|
|
|
- df_merged = pd.merge(df_merged, df_traffic, on=["time"], how="left")
|
|
|
+ df_merged['matched_time'] = df_traffic.iloc[indices.flatten()]['simTime'].values
|
|
|
|
|
|
+ # 合并DataFrame
|
|
|
+ df_merged = pd.merge(df_merged, df_traffic, left_on='matched_time', right_on='simTime', how='left')
|
|
|
+ # df_merged = pd.merge(df_merged, df_traffic, on=["time1"], how="left")
|
|
|
+ # df_merged = df_merged.drop(columns = ['time1'])
|
|
|
# 再次处理可能的列名重复问题
|
|
|
df_merged = clean_duplicate_columns(df_merged)
|
|
|
+ df_merged = df_merged.drop(columns=['time_x', 'time_y', 'matched_time'])
|
|
|
|
|
|
# 检查trafficlight_id列是否存在
|
|
|
trafficlight_col = 'trafficlight_id'
|
|
@@ -1386,11 +1524,14 @@ class FinalDataProcessor:
|
|
|
if phase_direction == row['vehicle_direction']:
|
|
|
# 查找该方向的所有红绿灯 ID
|
|
|
relevant_ids = [tid for tid, direction in trafficlight_to_direction.items()
|
|
|
- if direction == phase_direction]
|
|
|
+ if direction == phase_direction]
|
|
|
|
|
|
# 如果 trafficlight_id 在 EgoMap 中且方向匹配
|
|
|
- if trafficlight_col in row and not pd.isna(row[trafficlight_col]) and row[trafficlight_col] in relevant_ids:
|
|
|
- return row[trafficlight_col]
|
|
|
+ # if trafficlight_col in row and not pd.isna(row[trafficlight_col]) and row[trafficlight_col] in relevant_ids:
|
|
|
+ if trafficlight_col in row:
|
|
|
+ if not pd.isna(row[trafficlight_col]):
|
|
|
+ if row[trafficlight_col] in relevant_ids:
|
|
|
+ return row[trafficlight_col]
|
|
|
|
|
|
return np.nan
|
|
|
|
|
@@ -1398,9 +1539,9 @@ class FinalDataProcessor:
|
|
|
df_merged['filtered_trafficlight_id'] = df_merged.apply(filter_relevant_traffic_light, axis=1)
|
|
|
|
|
|
# 清理临时列
|
|
|
- print(f"删除 time 列前 df_merged 的列: {df_merged.columns.tolist()}")
|
|
|
- df_merged.drop(columns=['time'], inplace=True)
|
|
|
- print(f"删除 time 列后 df_merged 的列: {df_merged.columns.tolist()}")
|
|
|
+ # print(f"删除 time 列前 df_merged 的列: {df_merged.columns.tolist()}")
|
|
|
+ # df_merged.drop(columns=['time'], inplace=True)
|
|
|
+ # print(f"删除 time 列后 df_merged 的列: {df_merged.columns.tolist()}")
|
|
|
|
|
|
# 确保 simTime 列存在
|
|
|
if 'simTime' not in df_merged.columns:
|
|
@@ -1545,7 +1686,6 @@ class FinalDataProcessor:
|
|
|
|
|
|
return df_merged
|
|
|
|
|
|
-
|
|
|
def _process_trafficlight_data(self) -> pd.DataFrame:
|
|
|
"""Processes traffic light JSON data if available."""
|
|
|
# Check if json_path is provided and exists
|
|
@@ -1640,6 +1780,7 @@ class FinalDataProcessor:
|
|
|
print(f"Unexpected error processing traffic light data: {e}")
|
|
|
return pd.DataFrame()
|
|
|
|
|
|
+
|
|
|
# --- Rosbag Processing ---
|
|
|
class RosbagProcessor:
|
|
|
"""Extracts data from HMIdata files within a ZIP archive."""
|
|
@@ -1652,7 +1793,6 @@ class RosbagProcessor:
|
|
|
"""Finds, extracts, and processes rosbags from the ZIP file."""
|
|
|
print(f"--- Processing HMIdata in {self.config.zip_path} ---")
|
|
|
|
|
|
-
|
|
|
with tempfile.TemporaryDirectory() as tmp_dir_str:
|
|
|
try:
|
|
|
with zipfile.ZipFile(self.config.zip_path, 'r') as zip_ref:
|
|
@@ -1712,7 +1852,7 @@ def run_cpp_engine(config: Config):
|
|
|
check=True, # Raise exception on non-zero exit code
|
|
|
capture_output=True, # Capture stdout/stderr
|
|
|
text=True, # Decode output as text
|
|
|
- cwd=config.engine_path.parent # Run from the engine's directory? Or script's? Adjust if needed.
|
|
|
+ # cwd=config.engine_path.parent # Run from the engine's directory? Or script's? Adjust if needed.
|
|
|
)
|
|
|
print("C++ Engine Output:")
|
|
|
print(result.stdout)
|
|
@@ -1737,4 +1877,4 @@ def run_cpp_engine(config: Config):
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
- pass
|
|
|
+ pass
|