data_process.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/11/27
  11. @Last Modified: 2023/11/27
  12. @Summary: Csv data process functions
  13. """
  14. import os
  15. import sys
  16. import numpy as np
  17. import pandas as pd
  18. from status_mapping import *
  19. # from status_mapping import acc_status_mapping, lka_status_mapping, ldw_status_mapping
  20. from data_quality import DataQuality, get_all_files, frame_loss_statistic
  21. from common import cal_velocity
  22. from data_info import CsvData
  23. import log
  24. class DataProcess(object):
  25. """
  26. The data process class. It is a template to get evaluation raw data and process the raw data.
  27. Attributes:
  28. """
  29. def __init__(self, data_path, config, case_name):
  30. # base info
  31. self.data_path = data_path
  32. self.case_name = case_name
  33. self.config = config
  34. # drive data
  35. self.ego_df = pd.DataFrame()
  36. self.object_df = pd.DataFrame()
  37. self.driver_ctrl_df = pd.DataFrame()
  38. self.vehicle_sys_df = pd.DataFrame()
  39. self.status_df = pd.DataFrame()
  40. # environment data
  41. self.lane_info_df = pd.DataFrame()
  42. self.road_mark_df = pd.DataFrame()
  43. self.road_pos_df = pd.DataFrame()
  44. self.traffic_light_df = pd.DataFrame()
  45. self.traffic_signal_df = pd.DataFrame()
  46. self.frame_rate = float()
  47. self.obj_data = {}
  48. self.ego_data = {}
  49. self.obj_id_list = {}
  50. self.car_info = {}
  51. self.report_info = {}
  52. self.driver_ctrl_data = {}
  53. self._process()
  54. def _process(self):
  55. # self._signal_mapping()
  56. self._merge_csv()
  57. self._read_csv()
  58. self._invalid_detect()
  59. self._fill_missing_columns()
  60. # self._cal_frame_rate()
  61. # self._time_alignment()
  62. self.car_info = self._get_car_info(self.object_df)
  63. # self._compact_data()
  64. # self._abnormal_detect()
  65. # self._status_map(self.object_df)
  66. self._object_accel_get_from_egostate()
  67. self._object_df_process()
  68. self.report_info = self._get_report_info(self.obj_data[1])
  69. self.driver_ctrl_data = self._get_driver_ctrl_data(self.driver_ctrl_df)
  70. def _invalid_column_detect(self, df, csv_name): # head and tail detect
  71. """
  72. Detect the head of the csv whether begin with 0 or not.
  73. Returns:
  74. A dataframe, which 'time' column begin with 0.
  75. """
  76. logger = log.get_logger()
  77. for column in df.columns:
  78. if df[column].nunique() == 1:
  79. # if 9999.00 in df[column].values or "9999.00" in df[column].values:
  80. logger.warning(
  81. f"[case:{self.case_name}] SINGLE_CASE_EVAL: [{csv_name}] data '{column}' invalid WARNING!")
  82. def _csv_interpolate_by_frame(self, df):
  83. # df = pd.read_csv(input) # 读取CSV文件
  84. df['simFrame'] = pd.to_numeric(df['simFrame'], errors='coerce') # 转换simFrame列为数字类型
  85. df = df.sort_values(by='simFrame') # 根据simFrame列进行排序
  86. full_simFrame_series = pd.Series(range(df['simFrame'].min(), df['simFrame'].max() + 1)) # 构建一个包含连续simFrame的完整序列
  87. df = df.merge(full_simFrame_series.rename('simFrame'), how='right') # 使用merge方法将原始数据与完整序列合并,以填充缺失的simFrame行
  88. df = df.interpolate(method='linear') # 对其他列进行线性插值
  89. df['simFrame'] = df['simFrame'].astype(int) # 恢复simFrame列的数据类型为整数
  90. # df.to_csv(output, index=False) # 保存处理后的数据到新的CSV文件
  91. result = df.copy()
  92. return result
  93. def _cal_frame_rate(self):
  94. object_df = self.object_df.copy()
  95. ego_df = object_df[object_df['playerId'] == 1]
  96. df_filtered = ego_df[(ego_df['simTime'] > 0) & (ego_df['simTime'] <= 1)]
  97. self.frame_rate = df_filtered.shape[0]
  98. def _data_time_align(self, base_time, df):
  99. # 特判,如果输入的dataframe无数值,那么直接返回
  100. if df.empty:
  101. return df
  102. # FRAME_RATE = self.frame_rate
  103. time_diff = 1.0 / self.frame_rate
  104. # 创建一个新的递增的 simTime 序列,从 0 开始,步长为 0.01 (time_diff)
  105. new_sim_time_values = np.arange(0, base_time.max() + time_diff, time_diff)
  106. # 创建一个映射字典,将原始 simTime 值映射到新的 simTime 值
  107. original_to_new_sim_time = {original: round(new_sim_time_values[i], 2) for i, original in enumerate(base_time)}
  108. # 使用isin函数来过滤df,并筛选出simFrame大于0的数据
  109. filtered_df1 = df[df['simTime'].isin(base_time)]
  110. filtered_df2 = filtered_df1[filtered_df1['simFrame'] > 0]
  111. filtered_df = filtered_df2.reset_index(drop=True)
  112. # 使用映射字典来替换 filtered_df 中的 simTime 列
  113. filtered_df['simTime'] = filtered_df['simTime'].map(original_to_new_sim_time)
  114. # 同步更新simFrame
  115. filtered_df['simFrame'] = (filtered_df['simTime'] * self.frame_rate + 1).round().astype(int)
  116. return filtered_df
  117. @staticmethod
  118. def _speed_mps2kmph(df):
  119. df['speedX'] = df['speedX'] * 3.6 # m/s to km/h
  120. df['speedY'] = df['speedY'] * 3.6 # m/s to km/h
  121. df['speedZ'] = df['speedZ'] * 3.6 # m/s to km/h
  122. return df
  123. def _merge_csv(self):
  124. # read csv files
  125. df_ego = pd.read_csv(os.path.join(self.data_path, 'EgoState.csv')).drop_duplicates()
  126. df_object = pd.read_csv(os.path.join(self.data_path, 'ObjState.csv')).drop_duplicates() # 车辆行驶信息
  127. df_laneinfo = pd.read_csv(os.path.join(self.data_path, 'LaneInfo.csv')).drop_duplicates() # 曲率信息, 曲率加速度信息
  128. df_roadPos = pd.read_csv(os.path.join(self.data_path, 'RoadPos.csv')).drop_duplicates()
  129. df_status = pd.read_csv(os.path.join(self.data_path, 'VehState.csv'), index_col=False).drop_duplicates() # 状态机
  130. df_vehicleSys = pd.read_csv(os.path.join(self.data_path, 'VehicleSystems.csv')).drop_duplicates() # 车灯信息
  131. self.lane_info_df = df_laneinfo
  132. self.vehicle_sys_df = df_vehicleSys
  133. self.status_df = df_status
  134. self._invalid_detect_before_merge() # invalid detect
  135. # self._invalid_column_detect(df_laneinfo, 'LaneInfo.csv')
  136. # self._invalid_column_detect(df_object, 'ObjState.csv')
  137. # self._invalid_column_detect(df_vehicleSys, 'VehicleSystems.csv')
  138. # self._invalid_column_detect(df_status, 'VehState.csv')
  139. df_ego['simTime'] = df_ego['simTime'].round(2) # EGO: km/h
  140. df_object['simTime'] = df_object['simTime'].round(2) # OBJ: m/s, need unit conversion
  141. df_object = self._speed_mps2kmph(df_object) # m/s to km/h
  142. # base_time = df_ego['simTime'].unique()
  143. # df_ego = self._data_time_align(base_time, df_ego)
  144. # df_object = self._data_time_align(base_time, df_object)
  145. # df_laneinfo = self._data_time_align(base_time, df_laneinfo)
  146. # df_roadPos = self._data_time_align(base_time, df_roadPos)
  147. # df_status = self._data_time_align(base_time, df_status)
  148. # df_vehicleSys = self._data_time_align(base_time, df_vehicleSys)
  149. EGO_PLAYER_ID = 1
  150. # 合并 ego_df 和 obj_df
  151. df_ego['playerId'] = EGO_PLAYER_ID
  152. combined_df = pd.concat([df_object, df_ego]).drop_duplicates(subset=['simTime', 'simFrame', 'playerId'])
  153. df_object = combined_df.sort_values(
  154. by=['simTime', 'simFrame', 'playerId']).copy() # 按simTime/simFrame/playerId排序
  155. df_laneinfo['curvHor'] = df_laneinfo['curvHor'].round(3)
  156. df_laneinfo.rename(columns={"id": 'laneId'}, inplace=True)
  157. result = pd.merge(df_roadPos, df_laneinfo, how='inner', on=["simTime", "simFrame", "playerId", "laneId"])
  158. df_laneinfo_new = result[["simTime", "simFrame", "playerId", "curvHor", "curvHorDot"]].copy().drop_duplicates()
  159. # status mapping
  160. df_status = self._status_mapping(df_status)
  161. df_status = df_status[['simTime', 'ACC_status', 'Aeb_status', 'LKA_status', 'ICA_status', 'LDW_status']].copy()
  162. df_roadPos = df_roadPos[["simTime", "simFrame", "playerId", "laneOffset", "rollRel", "pitchRel"]].copy()
  163. # df merge
  164. df_vehicleSys = df_vehicleSys[['simTime', 'simFrame', 'lightMask', 'steering']].copy()
  165. merged_df = pd.merge(df_object, df_vehicleSys, on=["simTime", "simFrame"], how="left")
  166. merged_df1 = pd.merge(merged_df, df_laneinfo_new, on=["simTime", "simFrame", "playerId"], how="left")
  167. merged_df1 = pd.merge(merged_df1, df_roadPos, on=["simTime", "simFrame", "playerId"], how="left")
  168. merged_df2 = pd.merge_asof(merged_df1, df_status, on="simTime", direction='nearest')
  169. mg_df = merged_df2.drop_duplicates() # 去重
  170. mg_df = mg_df[mg_df.simFrame > 0].copy()
  171. mg_df.to_csv(os.path.join(self.data_path, 'merged_ObjState.csv'), index=False)
  172. print('The files are merged.')
  173. def _read_csv(self):
  174. """
  175. Read csv files to dataframe.
  176. Args:
  177. data_path: A str of the path of csv files
  178. Returns:
  179. No returns.
  180. """
  181. self.driver_ctrl_df = pd.read_csv(os.path.join(self.data_path, 'DriverCtrl.csv')).drop_duplicates()
  182. self.ego_df = pd.read_csv(os.path.join(self.data_path, 'EgoState.csv')).drop_duplicates()
  183. # self.object_df = pd.read_csv(os.path.join(self.data_path, 'ObjState.csv'))
  184. self.object_df = pd.read_csv(os.path.join(self.data_path, 'merged_ObjState.csv')).drop_duplicates(
  185. subset=['simTime', 'simFrame', 'playerId'])
  186. self.road_mark_df = pd.read_csv(os.path.join(self.data_path, 'RoadMark.csv')).drop_duplicates()
  187. self.road_pos_df = pd.read_csv(os.path.join(self.data_path, 'RoadPos.csv')).drop_duplicates()
  188. self.traffic_light_df = pd.read_csv(os.path.join(self.data_path, 'TrafficLight.csv')).drop_duplicates()
  189. self.traffic_signal_df = pd.read_csv(os.path.join(self.data_path, 'TrafficSign.csv')).drop_duplicates()
  190. def _invalid_detect_before_merge(self):
  191. # invalid detect
  192. self._invalid_column_detect(self.lane_info_df, 'LaneInfo.csv')
  193. # self._invalid_column_detect(self.object_df, 'ObjState.csv')
  194. self._invalid_column_detect(self.vehicle_sys_df, 'VehicleSystems.csv')
  195. self._invalid_column_detect(self.status_df, 'VehState.csv')
  196. def _invalid_detect(self):
  197. # invalid detect
  198. self._invalid_column_detect(self.ego_df, 'EgoState.csv')
  199. self._invalid_column_detect(self.object_df, 'ObjState.csv')
  200. self._invalid_column_detect(self.driver_ctrl_df, 'DriverCtrl.csv')
  201. self._invalid_column_detect(self.road_mark_df, 'RoadMark.csv')
  202. self._invalid_column_detect(self.road_pos_df, 'RoadPos.csv')
  203. self._invalid_column_detect(self.traffic_light_df, 'TrafficLight.csv')
  204. self._invalid_column_detect(self.traffic_signal_df, 'TrafficSign.csv')
  205. def _fill_missing_columns(self):
  206. pass
  207. def _time_alignment(self):
  208. base_time = self.ego_df['simTime'].unique()
  209. self.driver_ctrl_df = self._data_time_align(base_time, self.driver_ctrl_df)
  210. self.ego_df = self._data_time_align(base_time, self.ego_df)
  211. self.object_df = self._data_time_align(base_time, self.object_df)
  212. self.road_mark_df = self._data_time_align(base_time, self.road_mark_df)
  213. self.road_pos_df = self._data_time_align(base_time, self.road_pos_df)
  214. self.traffic_light_df = self._data_time_align(base_time, self.traffic_light_df)
  215. self.traffic_signal_df = self._data_time_align(base_time, self.traffic_signal_df)
  216. print("The data is aligned.")
  217. # interpolate data
  218. # self.driver_ctrl_df = self._csv_interpolate_by_frame(self.driver_ctrl_df)
  219. # self.ego_df = self._csv_interpolate_by_frame(self.ego_df)
  220. # self.object_df = self._csv_interpolate_by_frame(self.object_df)
  221. # self.road_mark_df = self._csv_interpolate_by_frame(self.road_mark_df)
  222. # self.road_pos_df = self._csv_interpolate_by_frame(self.road_pos_df)
  223. # self.traffic_light_df = self._csv_interpolate_by_frame(self.traffic_light_df)
  224. # self.traffic_signal_df = self._csv_interpolate_by_frame(self.traffic_signal_df)
  225. def _signal_mapping(self):
  226. pass
  227. # singal mapping
  228. # signal_json = r'./signal.json'
  229. # signal_dict = json2dict(signal_json)
  230. # df_objectstate = signal_name_map(df_objectstate, signal_dict, 'objectState')
  231. # df_roadmark = signal_name_map(df_roadmark, signal_dict, 'roadMark')
  232. # df_roadpos = signal_name_map(df_roadpos, signal_dict, 'roadPos')
  233. # df_trafficlight = signal_name_map(df_trafficlight, signal_dict, 'trafficLight')
  234. # df_trafficsignal = signal_name_map(df_trafficsignal, signal_dict, 'trafficSignal')
  235. # df_drivectrl = signal_name_map(df_drivectrl, signal_dict, 'driverCtrl')
  236. # df_laneinfo = signal_name_map(df_laneinfo, signal_dict, 'laneInfo')
  237. # df_status = signal_name_map(df_status, signal_dict, 'statusMachine')
  238. # df_vehiclesys = signal_name_map(df_vehiclesys, signal_dict, 'vehicleSys')
  239. def _get_car_info(self, df):
  240. """
  241. Args:
  242. df:
  243. Returns:
  244. """
  245. EGO_PLAYER_ID = 1
  246. first_row = df[df['playerId'] == EGO_PLAYER_ID].iloc[0].to_dict()
  247. length = first_row['dimX']
  248. width = first_row['dimY']
  249. height = first_row['dimZ']
  250. offset = first_row['offX']
  251. car_info = {
  252. "length": length,
  253. "width": width,
  254. "height": height,
  255. "offset": offset
  256. }
  257. return car_info
  258. def _compact_data(self):
  259. """
  260. Extra necessary data from dataframes.
  261. Returns:
  262. """
  263. self.object_df = self.object_df[CsvData.OBJECT_INFO].copy()
  264. def _abnormal_detect(self): # head and tail detect
  265. """
  266. Detect the head of the csv whether begin with 0 or not.
  267. Returns:
  268. A dataframe, which 'time' column begin with 0.
  269. """
  270. pass
  271. def _unit_unified(self):
  272. pass
  273. def _object_accel_get_from_egostate(self):
  274. # 使用merge函数来合并两个DataFrame,基于simTime和playerId列
  275. # 我们使用how='left'来确保df_object中的所有行都被保留
  276. # 并且当在df_ego中找到匹配时,使用df_ego中的accel值
  277. merged = pd.merge(self.object_df, self.ego_df[['simTime', 'playerId', 'accelX']],
  278. on=['simTime', 'playerId'], how='left', suffixes=('', '_y'))
  279. merged = pd.merge(merged, self.ego_df[['simTime', 'playerId', 'accelY']],
  280. on=['simTime', 'playerId'], how='left', suffixes=('', '_y'))
  281. # 因为我们使用了suffixes参数来避免列名冲突(尽管在这个特定情况下可能不是必需的),
  282. # 但现在我们有一个名为'accel_y'的列,它包含了我们要更新的值
  283. # 如果不担心列名冲突,可以省略suffixes参数,并直接使用'accel'作为列名
  284. # 在这种情况下,你只需要选择正确的列来更新df_object
  285. # 更新df_object的accel列
  286. # 如果使用了suffixes参数,则使用'accel_y'
  287. # 如果没有使用suffixes参数,并且确信没有列名冲突,则直接使用'accel'
  288. self.object_df['accelX'] = merged['accelX_y'] # 如果使用了suffixes
  289. self.object_df['accelY'] = merged['accelY_y'] # 如果使用了suffixes
  290. def _object_df_process(self):
  291. """
  292. Process the data of object dataframe. Save the data groupby object_ID.
  293. Returns:
  294. No returns.
  295. """
  296. EGO_PLAYER_ID = 1
  297. data = self.object_df.copy()
  298. # calculate common parameters
  299. data['lat_v'] = data['speedY'] * 1
  300. data['lon_v'] = data['speedX'] * 1
  301. data['v'] = data.apply(lambda row: cal_velocity(row['lat_v'], row['lon_v']), axis=1)
  302. data['v'] = data['v'] # km/h
  303. # calculate acceleraton components
  304. data['lat_acc'] = data['accelY'] * 1
  305. data['lon_acc'] = data['accelX'] * 1
  306. data['accel'] = data.apply(lambda row: cal_velocity(row['lat_acc'], row['lon_acc']), axis=1)
  307. self.object_df = data.copy()
  308. # calculate respective parameters
  309. for obj_id, obj_data in data.groupby("playerId"):
  310. self.obj_data[obj_id] = obj_data
  311. self.obj_data[obj_id]['time_diff'] = self.obj_data[obj_id]['simTime'].diff()
  312. self.obj_data[obj_id]['lat_acc_diff'] = self.obj_data[obj_id]['lat_acc'].diff()
  313. self.obj_data[obj_id]['lon_acc_diff'] = self.obj_data[obj_id]['lon_acc'].diff()
  314. self.obj_data[obj_id]['yawrate_diff'] = self.obj_data[obj_id]['speedH'].diff()
  315. self.obj_data[obj_id]['lat_acc_roc'] = self.obj_data[obj_id]['lat_acc_diff'] / self.obj_data[obj_id][
  316. 'time_diff']
  317. self.obj_data[obj_id]['lon_acc_roc'] = self.obj_data[obj_id]['lon_acc_diff'] / self.obj_data[obj_id][
  318. 'time_diff']
  319. self.obj_data[obj_id]['accelH'] = self.obj_data[obj_id]['yawrate_diff'] / self.obj_data[obj_id][
  320. 'time_diff']
  321. self.obj_data[obj_id]['lat_acc_roc'] = self.obj_data[obj_id]['lat_acc_roc'].replace([np.inf, -np.inf],
  322. [9999, -9999])
  323. self.obj_data[obj_id]['lon_acc_roc'] = self.obj_data[obj_id]['lon_acc_roc'].replace([np.inf, -np.inf],
  324. [9999, -9999])
  325. self.obj_data[obj_id]['accelH'] = self.obj_data[obj_id]['accelH'].replace([np.inf, -np.inf], [9999, -9999])
  326. # get object id list
  327. self.obj_id_list = list(self.obj_data.keys())
  328. self.ego_data = self.obj_data[EGO_PLAYER_ID]
  329. def _mileage_cal(self, df1):
  330. """
  331. Calculate mileage of given df.
  332. Args:
  333. df1: A dataframe of driving data.
  334. Returns:
  335. mileage: A float of the mileage(meter) of the driving data.
  336. """
  337. df = df1.copy()
  338. # if 9999.00 in df['travelDist'].values or "9999.00" in df['travelDist'].values:
  339. if df['travelDist'].nunique() == 1:
  340. df['time_diff'] = df['simTime'].diff() # 计算时间间隔
  341. df['avg_speed'] = (df['v'] + df['v'].shift()) / 2 # 计算每个时间间隔的平均速度
  342. df['distance_increment'] = df['avg_speed'] * df['time_diff'] / 3.6 # 计算每个时间间隔的距离增量
  343. # 计算当前里程
  344. df['travelDist'] = df['distance_increment'].cumsum()
  345. df['travelDist'] = df['travelDist'].fillna(0)
  346. mile_start = df['travelDist'].iloc[0]
  347. mile_end = df['travelDist'].iloc[-1]
  348. mileage = round(mile_end - mile_start, 2)
  349. return mileage
  350. def _duration_cal(self, df):
  351. """
  352. Calculate duration of given df.
  353. Args:
  354. df: A dataframe of driving data.
  355. Returns:
  356. duration: A float of the duration(second) of the driving data.
  357. """
  358. time_start = df['simTime'].iloc[0]
  359. time_end = df['simTime'].iloc[-1]
  360. duration = time_end - time_start
  361. return duration
  362. def _get_report_info(self, df):
  363. """
  364. Get report infomation from dataframe.
  365. Args:
  366. df: A dataframe of driving data.
  367. Returns:
  368. report_info: A dict of report infomation.
  369. """
  370. mileage = self._mileage_cal(df)
  371. duration = self._duration_cal(df)
  372. report_info = {
  373. "mileage": mileage,
  374. "duration": duration
  375. }
  376. return report_info
  377. def _status_mapping(self, df):
  378. # df['Abpb_status'] = df['Abpb_status'].apply(lambda x: abpb_status_mapping(x))
  379. df['ACC_status'] = df['ACC_status'].apply(lambda x: acc_status_mapping(x))
  380. df['Aeb_status'] = df['Aeb_status'].apply(lambda x: aeb_status_mapping(x))
  381. # df['Awb_status'] = df['Awb_status'].apply(lambda x: ldw_status_mapping(x))
  382. # df['DOW_status'] = df['DOW_status'].apply(lambda x: ldw_status_mapping(x))
  383. # df['Eba_status'] = df['Eba_status'].apply(lambda x: ldw_status_mapping(x))
  384. # df['ELK_status'] = df['ELK_status'].apply(lambda x: ldw_status_mapping(x))
  385. # df['ESA_status'] = df['ESA_status'].apply(lambda x: ldw_status_mapping(x))
  386. # df['Fcw_status'] = df['Fcw_status'].apply(lambda x: ldw_status_mapping(x))
  387. df['ICA_status'] = df['ICA_status'].apply(lambda x: ica_status_mapping(x))
  388. # df['ISLC_status'] = df['ISLC_status'].apply(lambda x: ldw_status_mapping(x))
  389. # df['JA_status'] = df['JA_status'].apply(lambda x: ldw_status_mapping(x))
  390. df['LKA_status'] = df['LKA_status'].apply(lambda x: lka_status_mapping(x))
  391. df['LDW_status'] = df['LDW_status'].apply(lambda x: ldw_status_mapping(x))
  392. # df['NOA_status'] = df['NOA_status'].apply(lambda x: ldw_status_mapping(x))
  393. # df['RCW_status'] = df['RCW_status'].apply(lambda x: ldw_status_mapping(x))
  394. # df['TLC_status'] = df['TLC_status'].apply(lambda x: ldw_status_mapping(x))
  395. # df['FVSR_status'] = df['FVSR_status'].apply(lambda x: ldw_status_mapping(x))
  396. # df['BSD_status'] = df['BSD_status'].apply(lambda x: ldw_status_mapping(x))
  397. # df['RCTA_status'] = df['RCTA_status'].apply(lambda x: ldw_status_mapping(x))
  398. # df['FCTA_status'] = df['FCTA_status'].apply(lambda x: ldw_status_mapping(x))
  399. # df['ISA_status'] = df['ISA_status'].apply(lambda x: ldw_status_mapping(x))
  400. # df['TSR_status'] = df['TSR_status'].apply(lambda x: ldw_status_mapping(x))
  401. # df['AVM_status'] = df['AVM_status'].apply(lambda x: ldw_status_mapping(x))
  402. # df['PDC_status'] = df['PDC_status'].apply(lambda x: ldw_status_mapping(x))
  403. # df['APA_status'] = df['APA_status'].apply(lambda x: ldw_status_mapping(x))
  404. # df['MEB_status'] = df['MEB_status'].apply(lambda x: ldw_status_mapping(x))
  405. # df['RDA_status'] = df['RDA_status'].apply(lambda x: ldw_status_mapping(x))
  406. return df
  407. def _get_driver_ctrl_data(self, df):
  408. """
  409. Process and get drive ctrl information. Such as brake pedal, throttle pedal and steering wheel.
  410. Args:
  411. df: A dataframe of driver ctrl data.
  412. Returns:
  413. driver_ctrl_data: A dict of driver ctrl info.
  414. """
  415. time_list = df['simTime'].round(2).values.tolist()
  416. frame_list = df['simFrame'].values.tolist()
  417. max_brakePedal = df['brakePedal'].max()
  418. if max_brakePedal < 1:
  419. df['brakePedal'] = df['brakePedal'] * 100
  420. brakePedal_list = df['brakePedal'].values.tolist()
  421. max_throttlePedal = df['throttlePedal'].max()
  422. if max_throttlePedal < 1:
  423. df['throttlePedal'] = df['throttlePedal'] * 100
  424. throttlePedal_list = df['throttlePedal'].values.tolist()
  425. steeringWheel_list = df['steeringWheel'].values.tolist()
  426. driver_ctrl_data = {
  427. "time_list": time_list,
  428. "frame_list": frame_list,
  429. "brakePedal_list": brakePedal_list,
  430. "throttlePedal_list": throttlePedal_list,
  431. "steeringWheel_list": steeringWheel_list
  432. }
  433. return driver_ctrl_data
  434. class StatusTime(object):
  435. """
  436. # 调用方式:使用isin()方法来筛选DataFrame中simTime列与time_list中值一致的行
  437. filtered_df = df[df['simTime'].isin(self.status_time_dict['ACC_status'])]
  438. """
  439. def __init__(self, status_df):
  440. self.status_df = status_df
  441. self.status_list = []
  442. self.no_status_time_list = []
  443. self.status_time_dict = {}
  444. self._run()
  445. def _get_status_list(self):
  446. status_columns_list = self.status_df.columns.tolist()
  447. self.status_list = [x for x in status_columns_list if x not in ['simTime', 'simFrame']]
  448. def _get_no_status_time_list(self):
  449. # 筛选出所有状态机列值都为0的行
  450. filtered_rows = self.status_df[self.status_df[self.status_list].eq(0).all(axis=1)]
  451. # 将筛选后的simTime列的值作为列表输出
  452. self.no_status_time_list = filtered_rows['simTime'].tolist()
  453. def _get_status_time_dict(self):
  454. for status in self.status_list:
  455. status_time = self.status_df[self.status_df[status] != 0]['simTime'].values.tolist()
  456. self.status_time_dict[status] = status_time
  457. self.status_time_dict['no_status'] = self.no_status_time_list
  458. def _run(self):
  459. self._get_status_list()
  460. self._get_no_status_time_list()
  461. self._get_status_time_dict()