comfort.py 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: zhanghaiwen(zhanghaiwen@china-icv.cn), yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/06/25
  11. @Last Modified: 2023/06/25
  12. @Summary: Comfort metrics
  13. """
  14. import os
  15. import sys
  16. import math
  17. import pandas as pd
  18. import numpy as np
  19. import scipy.signal
  20. sys.path.append('../common')
  21. sys.path.append('../modules')
  22. sys.path.append('../results')
  23. from data_info import DataInfoList
  24. from score_weight import cal_score_with_priority, cal_weight_from_80
  25. from common import get_interpolation, score_grade, string_concatenate, replace_key_with_value, get_frame_with_time
  26. import matplotlib.pyplot as plt
  27. def peak_valley_decorator(method):
  28. def wrapper(self, *args, **kwargs):
  29. peak_valley = self._peak_valley_determination(self.df)
  30. pv_list = self.df.loc[peak_valley, ['simTime', 'speedH']].values.tolist()
  31. if len(pv_list) != 0:
  32. flag = True
  33. p_last = pv_list[0]
  34. for i in range(1, len(pv_list)):
  35. p_curr = pv_list[i]
  36. if self._peak_valley_judgment(p_last, p_curr):
  37. # method(self, p_curr, p_last)
  38. method(self, p_curr, p_last, flag, *args, **kwargs)
  39. else:
  40. p_last = p_curr
  41. return method
  42. else:
  43. flag = False
  44. p_curr = [0, 0]
  45. p_last = [0, 0]
  46. method(self, p_curr, p_last, flag, *args, **kwargs)
  47. return method
  48. return wrapper
  49. class Comfort(object):
  50. """
  51. Class for achieving comfort metrics for autonomous driving.
  52. Attributes:
  53. dataframe: Vehicle driving data, stored in dataframe format.
  54. """
  55. def __init__(self, data_processed, custom_data, scoreModel, resultPath):
  56. self.eval_data = pd.DataFrame()
  57. self.data_processed = data_processed
  58. self.scoreModel = scoreModel
  59. self.resultPath = resultPath
  60. self.data = data_processed.obj_data[1]
  61. self.mileage = data_processed.report_info['mileage']
  62. self.ego_df = pd.DataFrame()
  63. self.discomfort_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
  64. # self.df_drivectrl = data_processed.driver_ctrl_df
  65. self.config = data_processed.config
  66. comfort_config = data_processed.comfort_config
  67. self.comfort_config = comfort_config
  68. # common data
  69. self.bulitin_metric_list = self.config.builtinMetricList
  70. # dimension data
  71. self.weight_custom = comfort_config['weightCustom']
  72. self.metric_list = comfort_config['metric']
  73. self.type_list = comfort_config['type']
  74. self.type_name_dict = comfort_config['typeName']
  75. self.name_dict = comfort_config['name']
  76. self.unit_dict = comfort_config['unit']
  77. # custom metric data
  78. self.customMetricParam = comfort_config['customMetricParam']
  79. self.custom_metric_list = list(self.customMetricParam.keys())
  80. self.custom_data = custom_data
  81. self.custom_param_dict = {}
  82. # score data
  83. self.weight = comfort_config['weightDimension']
  84. self.weight_type_dict = comfort_config['typeWeight']
  85. self.weight_type_list = comfort_config['typeWeightList']
  86. self.weight_dict = comfort_config['weight']
  87. self.weight_list = comfort_config['weightList']
  88. self.priority_dict = comfort_config['priority']
  89. self.priority_list = comfort_config['priorityList']
  90. self.kind_dict = comfort_config['kind']
  91. self.optimal_dict = comfort_config['optimal']
  92. self.optimal1_dict = self.optimal_dict[0]
  93. self.optimal2_dict = self.optimal_dict[1]
  94. self.optimal3_dict = self.optimal_dict[2]
  95. self.multiple_dict = comfort_config['multiple']
  96. self.kind_list = comfort_config['kindList']
  97. self.optimal_list = comfort_config['optimalList']
  98. self.multiple_list = comfort_config['multipleList']
  99. # metric data
  100. self.metric_dict = comfort_config['typeMetricDict']
  101. self.lat_metric_list = self.metric_dict['comfortLat']
  102. self.lon_metric_list = self.metric_dict['comfortLon']
  103. # self.lat_metric_list = ["zigzag", "shake"]
  104. # self.lon_metric_list = ["cadence", "slamBrake", "slamAccelerate"]
  105. self.time_list = data_processed.driver_ctrl_data['time_list']
  106. self.frame_list = data_processed.driver_ctrl_data['frame_list']
  107. self.linear_accel_dict = dict()
  108. self.angular_accel_dict = dict()
  109. self.count_dict = {}
  110. self.duration_dict = {}
  111. self.strength_dict = {}
  112. self.discomfort_count = 0
  113. self.zigzag_count = 0
  114. self.shake_count = 0
  115. self.cadence_count = 0
  116. self.slam_brake_count = 0
  117. self.slam_accel_count = 0
  118. self.zigzag_strength = 0
  119. self.shake_strength = 0
  120. self.cadence_strength = 0
  121. self.slam_brake_strength = 0
  122. self.slam_accel_strength = 0
  123. self.discomfort_duration = 0
  124. self.zigzag_duration = 0
  125. self.shake_duration = 0
  126. self.cadence_duration = 0
  127. self.slam_brake_duration = 0
  128. self.slam_accel_duration = 0
  129. self.zigzag_time_list = []
  130. self.zigzag_frame_list = []
  131. self.zigzag_stre_list = []
  132. self.cur_ego_path_list = []
  133. self.curvature_list = []
  134. self._get_data()
  135. self._comf_param_cal()
  136. def _get_data(self):
  137. """
  138. """
  139. comfort_info_list = DataInfoList.COMFORT_INFO
  140. self.ego_df = self.data[comfort_info_list].copy()
  141. # self.df = self.ego_df.set_index('simFrame') # 索引是csv原索引
  142. self.df = self.ego_df.reset_index(drop=True) # 索引是csv原索引
  143. def _cal_cur_ego_path(self, row):
  144. try:
  145. divide = (row['speedX'] ** 2 + row['speedY'] ** 2) ** (3 / 2)
  146. if not divide:
  147. res = None
  148. else:
  149. res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
  150. except:
  151. res = None
  152. return res
  153. def _cal_max_min_avg(self, num_list):
  154. maxx = max(num_list) if num_list else "-"
  155. minn = min(num_list) if num_list else "-"
  156. avg = sum(num_list) / len(num_list) if num_list else "-"
  157. result = {
  158. "max": maxx,
  159. "min": minn,
  160. "avg": avg
  161. }
  162. return result
  163. def _comf_param_cal(self):
  164. """
  165. """
  166. for i in range(len(self.optimal_list)):
  167. if i % 3 == 2:
  168. continue
  169. else:
  170. self.optimal_list[i] = round(self.optimal_list[i] * self.mileage / 100000, 8)
  171. self.optimal1_dict = {key: value * self.mileage / 100000 for key, value in self.optimal1_dict.copy().items()}
  172. self.optimal2_dict = {key: value * self.mileage / 100000 for key, value in self.optimal2_dict.copy().items()}
  173. # [log]
  174. self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
  175. self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
  176. self.ego_df['slam_brake'] = self.ego_df.apply(
  177. lambda row: self._slam_brake_process(row['lon_acc'], row['ip_dec']), axis=1)
  178. self.ego_df['slam_accel'] = self.ego_df.apply(
  179. lambda row: self._slam_accelerate_process(row['lon_acc'], row['ip_acc']), axis=1)
  180. self.ego_df['cadence'] = self.ego_df.apply(
  181. lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
  182. self.accel_list = self.ego_df['accel'].values.tolist()
  183. self.accelH_list = self.ego_df['accelH'].values.tolist()
  184. self.linear_accel_dict = self._cal_max_min_avg(self.ego_df['accel'].dropna().values.tolist())
  185. self.angular_accel_dict = self._cal_max_min_avg(self.ego_df['accelH'].dropna().values.tolist())
  186. # for shake detector
  187. # self.ego_df['cur_ego_path'] = self.ego_df.apply(self._cal_cur_ego_path, axis=1)
  188. # self.ego_df['curvHor'] = self.ego_df['curvHor'].astype('float')
  189. # self.ego_df['cur_diff'] = (self.ego_df['cur_ego_path'] - self.ego_df['curvHor']).abs()
  190. # self.ego_df['R'] = self.ego_df['curvHor'].apply(lambda x: 10000 if x == 0 else 1 / x)
  191. # self.ego_df['R_ego'] = self.ego_df['cur_ego_path'].apply(lambda x: 10000 if x == 0 else 1 / x)
  192. # self.ego_df['R_diff'] = (self.ego_df['R_ego'] - self.ego_df['R']).abs()
  193. #
  194. # self.cur_ego_path_list = self.ego_df['cur_ego_path'].values.tolist()
  195. # self.curvature_list = self.ego_df['curvHor'].values.tolist()
  196. def _peak_valley_determination(self, df):
  197. """
  198. Determine the peak and valley of the vehicle based on its current angular velocity.
  199. Parameters:
  200. df: Dataframe containing the vehicle angular velocity.
  201. Returns:
  202. peak_valley: List of indices representing peaks and valleys.
  203. """
  204. peaks, _ = scipy.signal.find_peaks(df['speedH'], height=0.01, distance=1, prominence=0.01)
  205. valleys, _ = scipy.signal.find_peaks(-df['speedH'], height=0.01, distance=1, prominence=0.01)
  206. peak_valley = sorted(list(peaks) + list(valleys))
  207. return peak_valley
  208. def _peak_valley_judgment(self, p_last, p_curr, tw=6000, avg=0.4):
  209. """
  210. Determine if the given peaks and valleys satisfy certain conditions.
  211. Parameters:
  212. p_last: Previous peak or valley data point.
  213. p_curr: Current peak or valley data point.
  214. tw: Threshold time difference between peaks and valleys.
  215. avg: Angular velocity gap threshold.
  216. Returns:
  217. Boolean indicating whether the conditions are satisfied.
  218. """
  219. t_diff = p_curr[0] - p_last[0]
  220. v_diff = abs(p_curr[1] - p_last[1])
  221. s = p_curr[1] * p_last[1]
  222. zigzag_flag = t_diff < tw and v_diff > avg and s < 0
  223. if zigzag_flag and ([p_last[0], p_curr[0]] not in self.zigzag_time_list):
  224. self.zigzag_time_list.append([p_last[0], p_curr[0]])
  225. return zigzag_flag
  226. @peak_valley_decorator
  227. def zigzag_count_func(self, p_curr, p_last, flag=True):
  228. """
  229. Count the number of zigzag movements.
  230. Parameters:
  231. df: Input dataframe data.
  232. Returns:
  233. zigzag_count: Number of zigzag movements.
  234. """
  235. if flag:
  236. self.zigzag_count += 1
  237. else:
  238. self.zigzag_count += 0
  239. @peak_valley_decorator
  240. def cal_zigzag_strength_strength(self, p_curr, p_last, flag=True):
  241. """
  242. Calculate various strength statistics.
  243. Returns:
  244. Tuple containing maximum strength, minimum strength,
  245. average strength, and 99th percentile strength.
  246. """
  247. if flag:
  248. v_diff = abs(p_curr[1] - p_last[1])
  249. t_diff = p_curr[0] - p_last[0]
  250. self.zigzag_stre_list.append(v_diff / t_diff) # 平均角加速度
  251. else:
  252. self.zigzag_stre_list = []
  253. def _shake_detector(self, Cr_diff=0.05, T_diff=0.39):
  254. """
  255. ego车横向加速度ax;
  256. ego车轨迹横向曲率;
  257. ego车轨迹曲率变化率;
  258. ego车所在车lane曲率;
  259. ego车所在车lane曲率变化率;
  260. 转向灯(暂时存疑,可不用)Cr_diff = 0.1, T_diff = 0.04
  261. 求解曲率公式k(t) = (x'(t) * y''(t) - y'(t) * x''(t)) / ((x'(t))^2 + (y'(t))^2)^(3/2)
  262. """
  263. time_list = []
  264. frame_list = []
  265. shake_time_list = []
  266. df = self.ego_df.copy()
  267. df = df[df['cur_diff'] > Cr_diff]
  268. df['frame_ID_diff'] = df['simFrame'].diff() # 找出行车轨迹曲率与道路曲率之差大于阈值的数据段
  269. filtered_df = df[df.frame_ID_diff > T_diff] # 此处是用大间隔区分多次晃动情景 。
  270. row_numbers = filtered_df.index.tolist()
  271. cut_column = pd.cut(df.index, bins=row_numbers)
  272. grouped = df.groupby(cut_column)
  273. dfs = {}
  274. for name, group in grouped:
  275. dfs[name] = group.reset_index(drop=True)
  276. for name, df_group in dfs.items():
  277. # 直道,未主动换道
  278. df_group['curvHor'] = df_group['curvHor'].abs()
  279. df_group_straight = df_group[(df_group.lightMask == 0) & (df_group.curvHor < 0.001)]
  280. if not df_group_straight.empty:
  281. tmp_list = df_group_straight['simTime'].values
  282. # shake_time_list.append([tmp_list[0], tmp_list[-1]])
  283. time_list.extend(df_group_straight['simTime'].values)
  284. frame_list.extend(df_group_straight['simFrame'].values)
  285. self.shake_count = self.shake_count + 1
  286. # 打转向灯,道路为直道,此时晃动判断标准车辆曲率变化率为一个更大的阈值
  287. df_group_change_lane = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'] < 0.001)]
  288. df_group_change_lane_data = df_group_change_lane[df_group_change_lane.cur_diff > Cr_diff + 0.2]
  289. if not df_group_change_lane_data.empty:
  290. tmp_list = df_group_change_lane_data['simTime'].values
  291. # shake_time_list.append([tmp_list[0], tmp_list[-1]])
  292. time_list.extend(df_group_change_lane_data['simTime'].values)
  293. frame_list.extend(df_group_change_lane_data['simFrame'].values)
  294. self.shake_count = self.shake_count + 1
  295. # 转弯,打转向灯
  296. df_group_turn = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'].abs() > 0.001)]
  297. df_group_turn_data = df_group_turn[df_group_turn.cur_diff.abs() > Cr_diff + 0.1]
  298. if not df_group_turn_data.empty:
  299. tmp_list = df_group_turn_data['simTime'].values
  300. # shake_time_list.append([tmp_list[0], tmp_list[-1]])
  301. time_list.extend(df_group_turn_data['simTime'].values)
  302. frame_list.extend(df_group_turn_data['simFrame'].values)
  303. self.shake_count = self.shake_count + 1
  304. t_list = time_list
  305. f_list = frame_list
  306. group_time = []
  307. group_frame = []
  308. sub_group_time = []
  309. sub_group_frame = []
  310. for i in range(len(f_list)):
  311. if not sub_group_time or t_list[i] - t_list[i - 1] <= 0.2:
  312. sub_group_time.append(t_list[i])
  313. sub_group_frame.append(f_list[i])
  314. else:
  315. group_time.append(sub_group_time)
  316. group_frame.append(sub_group_frame)
  317. sub_group_time = [t_list[i]]
  318. sub_group_frame = [f_list[i]]
  319. # group_time.append(sub_group_time)
  320. # group_frame.append(sub_group_frame)
  321. # group_time = [g for g in group_time if len(g) >= 3]
  322. # group_frame = [g for g in group_frame if len(g) >= 3]
  323. #
  324. # group_time = []
  325. # sub_group = []
  326. # for i in range(len(t_list)):
  327. # if not sub_group or t_list[i] - t_list[i - 1] <= 0.2:
  328. # sub_group.append(t_list[i])
  329. # else:
  330. # group_time.append(sub_group)
  331. # sub_group = [t_list[i]]
  332. #
  333. # group_time.append(sub_group)
  334. # group_time = [g for g in group_time if len(g) >= 3]
  335. # 输出图表值
  336. shake_time = [[g[0], g[-1]] for g in group_time]
  337. shake_frame = [[g[0], g[-1]] for g in group_frame]
  338. self.shake_count = len(shake_time)
  339. if shake_time:
  340. time_df = pd.DataFrame(shake_time, columns=['start_time', 'end_time'])
  341. frame_df = pd.DataFrame(shake_frame, columns=['start_frame', 'end_frame'])
  342. discomfort_df = pd.concat([time_df, frame_df], axis=1)
  343. discomfort_df['type'] = 'shake'
  344. self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  345. return time_list
  346. def _cadence_process(self, lon_acc_roc, ip_dec_roc):
  347. if abs(lon_acc_roc) >= abs(ip_dec_roc) or abs(lon_acc_roc) < 1:
  348. return np.nan
  349. # elif abs(lon_acc_roc) == 0:
  350. elif abs(lon_acc_roc) == 0:
  351. return 0
  352. elif lon_acc_roc > 0 and lon_acc_roc < -ip_dec_roc:
  353. return 1
  354. elif lon_acc_roc < 0 and lon_acc_roc > ip_dec_roc:
  355. return -1
  356. def _slam_brake_process(self, lon_acc, ip_dec):
  357. if lon_acc - ip_dec < 0:
  358. return 1
  359. else:
  360. return 0
  361. def _slam_accelerate_process(self, lon_acc, ip_acc):
  362. if lon_acc - ip_acc > 0:
  363. return 1
  364. else:
  365. return 0
  366. def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
  367. if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
  368. return np.nan
  369. # elif abs(lon_acc_roc) == 0:
  370. elif abs(lon_acc) == 0:
  371. return 0
  372. elif lon_acc > 0 and lon_acc < ip_acc:
  373. return 1
  374. elif lon_acc < 0 and lon_acc > ip_dec:
  375. return -1
  376. def _cadence_detector(self):
  377. """
  378. # 加速度突变:先加后减,先减后加,先加然后停,先减然后停
  379. # 顿挫:2s内多次加速度变化率突变
  380. # 求出每一个特征点,然后提取,然后将每一个特征点后面的2s做一个窗口,统计频率,避免无效运算
  381. # 将特征点筛选出来
  382. # 将特征点时间作为聚类标准,大于1s的pass,小于等于1s的聚类到一个分组
  383. # 去掉小于3个特征点的分组
  384. """
  385. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'cadence']].copy()
  386. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'cadence']].copy()
  387. time_list = data['simTime'].values.tolist()
  388. data = data[data['cadence'] != np.nan]
  389. data['cadence_diff'] = data['cadence'].diff()
  390. data.dropna(subset='cadence_diff', inplace=True)
  391. data = data[data['cadence_diff'] != 0]
  392. t_list = data['simTime'].values.tolist()
  393. f_list = data['simFrame'].values.tolist()
  394. group_time = []
  395. group_frame = []
  396. sub_group_time = []
  397. sub_group_frame = []
  398. for i in range(len(f_list)):
  399. if not sub_group_time or t_list[i] - t_list[i - 1] <= 1: # 特征点相邻一秒内的,算作同一组顿挫
  400. sub_group_time.append(t_list[i])
  401. sub_group_frame.append(f_list[i])
  402. else:
  403. group_time.append(sub_group_time)
  404. group_frame.append(sub_group_frame)
  405. sub_group_time = [t_list[i]]
  406. sub_group_frame = [f_list[i]]
  407. group_time.append(sub_group_time)
  408. group_frame.append(sub_group_frame)
  409. group_time = [g for g in group_time if len(g) >= 1] # 有一次特征点则算作一次顿挫
  410. group_frame = [g for g in group_frame if len(g) >= 1]
  411. # group_time = []
  412. # sub_group = []
  413. #
  414. # for i in range(len(f_list)):
  415. # if not sub_group or t_list[i] - t_list[i - 1] <= 1: # 特征点相邻一秒内的,算作同一组顿挫
  416. # sub_group.append(t_list[i])
  417. # else:
  418. # group_time.append(sub_group)
  419. # sub_group = [t_list[i]]
  420. #
  421. # group_time.append(sub_group)
  422. # group_time = [g for g in group_time if len(g) >= 1] # 有一次特征点则算作一次顿挫
  423. # 输出图表值
  424. cadence_time = [[g[0], g[-1]] for g in group_time]
  425. cadence_frame = [[g[0], g[-1]] for g in group_frame]
  426. if cadence_time:
  427. time_df = pd.DataFrame(cadence_time, columns=['start_time', 'end_time'])
  428. frame_df = pd.DataFrame(cadence_frame, columns=['start_frame', 'end_frame'])
  429. discomfort_df = pd.concat([time_df, frame_df], axis=1)
  430. discomfort_df['type'] = 'cadence'
  431. self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  432. # 将顿挫组的起始时间为组重新统计时间
  433. cadence_time_list = [time for pair in cadence_time for time in time_list if pair[0] <= time <= pair[1]]
  434. # time_list = [element for sublist in group_time for element in sublist]
  435. # merged_list = [element for sublist in res_group for element in sublist]
  436. # res_df = data[data['simTime'].isin(merged_list)]
  437. stre_list = []
  438. freq_list = []
  439. for g in group_time:
  440. # calculate strength
  441. g_df = data[data['simTime'].isin(g)]
  442. strength = g_df['lon_acc'].abs().mean()
  443. stre_list.append(strength)
  444. # calculate frequency
  445. cnt = len(g)
  446. t_start = g_df['simTime'].iloc[0]
  447. t_end = g_df['simTime'].iloc[-1]
  448. t_delta = t_end - t_start
  449. frequency = cnt / t_delta
  450. freq_list.append(frequency)
  451. self.cadence_count = len(freq_list)
  452. cadence_stre = sum(stre_list) / len(stre_list) if stre_list else 0
  453. return cadence_time_list
  454. def _slam_brake_detector(self):
  455. # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
  456. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_dec_roc', 'slam_brake']].copy()
  457. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'ip_dec', 'slam_brake']].copy()
  458. # data['slam_diff'] = data['slam_brake'].diff()
  459. # res_df = data[data['slam_diff'] == 1]
  460. res_df = data[data['slam_brake'] == 1]
  461. t_list = res_df['simTime'].values
  462. f_list = res_df['simFrame'].values.tolist()
  463. group_time = []
  464. group_frame = []
  465. sub_group_time = []
  466. sub_group_frame = []
  467. for i in range(len(f_list)):
  468. if not sub_group_time or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急刹
  469. sub_group_time.append(t_list[i])
  470. sub_group_frame.append(f_list[i])
  471. else:
  472. group_time.append(sub_group_time)
  473. group_frame.append(sub_group_frame)
  474. sub_group_time = [t_list[i]]
  475. sub_group_frame = [f_list[i]]
  476. group_time.append(sub_group_time)
  477. group_frame.append(sub_group_frame)
  478. group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急刹
  479. group_frame = [g for g in group_frame if len(g) >= 2]
  480. # group_time = []
  481. # sub_group = []
  482. #
  483. # for i in range(len(f_list)):
  484. # if not sub_group or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急刹
  485. # sub_group.append(t_list[i])
  486. # else:
  487. # group_time.append(sub_group)
  488. # sub_group = [t_list[i]]
  489. #
  490. # group_time.append(sub_group)
  491. # group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急刹
  492. # 输出图表值
  493. slam_brake_time = [[g[0], g[-1]] for g in group_time]
  494. slam_brake_frame = [[g[0], g[-1]] for g in group_frame]
  495. if slam_brake_time:
  496. time_df = pd.DataFrame(slam_brake_time, columns=['start_time', 'end_time'])
  497. frame_df = pd.DataFrame(slam_brake_frame, columns=['start_frame', 'end_frame'])
  498. discomfort_df = pd.concat([time_df, frame_df], axis=1)
  499. discomfort_df['type'] = 'slam_brake'
  500. self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  501. time_list = [element for sublist in group_time for element in sublist]
  502. self.slam_brake_count = len(group_time) # / self.mileage # * 1000000
  503. return time_list
  504. def _slam_accel_detector(self):
  505. # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
  506. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_acc_roc', 'slam_accel']].copy()
  507. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'ip_acc', 'slam_accel']].copy()
  508. # data['slam_diff'] = data['slam_accel'].diff()
  509. # res_df = data.loc[data['slam_diff'] == 1]
  510. res_df = data.loc[data['slam_accel'] == 1]
  511. t_list = res_df['simTime'].values
  512. f_list = res_df['simFrame'].values.tolist()
  513. group_time = []
  514. group_frame = []
  515. sub_group_time = []
  516. sub_group_frame = []
  517. for i in range(len(f_list)):
  518. if not group_time or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急加速
  519. sub_group_time.append(t_list[i])
  520. sub_group_frame.append(f_list[i])
  521. else:
  522. group_time.append(sub_group_time)
  523. group_frame.append(sub_group_frame)
  524. sub_group_time = [t_list[i]]
  525. sub_group_frame = [f_list[i]]
  526. group_time.append(sub_group_time)
  527. group_frame.append(sub_group_frame)
  528. group_time = [g for g in group_time if len(g) >= 2]
  529. group_frame = [g for g in group_frame if len(g) >= 2]
  530. # group_time = []
  531. # sub_group = []
  532. #
  533. # for i in range(len(f_list)):
  534. # if not sub_group or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急加速
  535. # sub_group.append(t_list[i])
  536. # else:
  537. # group_time.append(sub_group)
  538. # sub_group = [t_list[i]]
  539. #
  540. # group_time.append(sub_group)
  541. # group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急加速
  542. # 输出图表值
  543. slam_accel_time = [[g[0], g[-1]] for g in group_time]
  544. slam_accel_frame = [[g[0], g[-1]] for g in group_frame]
  545. if slam_accel_time:
  546. time_df = pd.DataFrame(slam_accel_time, columns=['start_time', 'end_time'])
  547. frame_df = pd.DataFrame(slam_accel_frame, columns=['start_frame', 'end_frame'])
  548. discomfort_df = pd.concat([time_df, frame_df], axis=1)
  549. discomfort_df['type'] = 'slam_accel'
  550. self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  551. time_list = [element for sublist in group_time for element in sublist]
  552. self.slam_accel_count = len(group_time) # / self.mileage # * 1000000
  553. return time_list
  554. def comf_statistic(self):
  555. """
  556. """
  557. # df = self.ego_df[['simTime', 'cur_diff', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
  558. df = self.ego_df[['simTime', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
  559. self.zigzag_count_func()
  560. self.cal_zigzag_strength_strength()
  561. if self.zigzag_time_list:
  562. zigzag_df = pd.DataFrame(self.zigzag_time_list, columns=['start_time', 'end_time'])
  563. zigzag_df = get_frame_with_time(zigzag_df, self.ego_df)
  564. zigzag_df['type'] = 'zigzag'
  565. self.discomfort_df = pd.concat([self.discomfort_df, zigzag_df], ignore_index=True)
  566. # discomfort_df = pd.concat([time_df, frame_df], axis=1)
  567. # self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  568. zigzag_t_list = []
  569. # 只有[t_start, t_end]数对,要提取为完整time list
  570. t_list = df['simTime'].values.tolist()
  571. for t_start, t_end in self.zigzag_time_list:
  572. index_1 = t_list.index(t_start)
  573. index_2 = t_list.index(t_end)
  574. zigzag_t_list.extend(t_list[index_1:index_2 + 1])
  575. zigzag_t_list = list(set(zigzag_t_list))
  576. # shake_t_list = self._shake_detector()
  577. cadence_t_list = self._cadence_detector()
  578. slam_brake_t_list = self._slam_brake_detector()
  579. slam_accel_t_list = self._slam_accel_detector()
  580. # comfort_time_dict = {
  581. # 'zigzag_time_list': zigzag_t_list,
  582. # 'shake_time_list': shake_t_list,
  583. # 'cadence_time_list': cadence_t_list,
  584. # 'slam_brake_time_list': slam_brake_t_list,
  585. # 'slam_accelerate_time_list': slam_accel_t_list
  586. # }
  587. discomfort_time_list = zigzag_t_list + cadence_t_list + slam_brake_t_list + slam_accel_t_list
  588. discomfort_time_list = sorted(discomfort_time_list) # 排序
  589. discomfort_time_list = list(set(discomfort_time_list)) # 去重
  590. time_diff = self.time_list[3] - self.time_list[2]
  591. # time_diff = 0.4
  592. self.discomfort_duration = len(discomfort_time_list) * time_diff
  593. df['flag_zigzag'] = df['simTime'].apply(lambda x: 1 if x in zigzag_t_list else 0)
  594. # df['flag_shake'] = df['simTime'].apply(lambda x: 1 if x in shake_t_list else 0)
  595. df['flag_cadence'] = df['simTime'].apply(lambda x: 1 if x in cadence_t_list else 0)
  596. df['flag_slam_brake'] = df['simTime'].apply(lambda x: 1 if x in slam_brake_t_list else 0)
  597. df['flag_slam_accel'] = df['simTime'].apply(lambda x: 1 if x in slam_accel_t_list else 0)
  598. # hectokilometer = 100000 # 百公里
  599. self.zigzag_duration = df['flag_zigzag'].sum() * time_diff # / self.mileage * hectokilometer
  600. # self.shake_duration = df['flag_shake'].sum() * time_diff # / self.mileage * hectokilometer
  601. self.cadence_duration = df['flag_cadence'].sum() * time_diff # / self.mileage * hectokilometer
  602. self.slam_brake_duration = df['flag_slam_brake'].sum() * time_diff # / self.mileage * hectokilometer
  603. self.slam_accel_duration = df['flag_slam_accel'].sum() * time_diff # / self.mileage * hectokilometer
  604. # 强度取值可考虑最大值,暂定平均值,具体视数据情况而定
  605. # self.zigzag_strength = np.mean(self.zigzag_stre_list) if self.zigzag_stre_list else 0
  606. self.zigzag_strength = (df['flag_zigzag'] * abs(df['accelH'])).mean()
  607. # self.shake_strength = (df['flag_shake'] * abs(df['cur_diff'])).mean()
  608. self.cadence_strength = (df['flag_cadence'] * abs(df['lon_acc'])).mean()
  609. self.slam_brake_strength = (df['flag_slam_brake'] * abs(df['lon_acc'])).mean()
  610. self.slam_accel_strength = (df['flag_slam_accel'] * abs(df['lon_acc'])).mean()
  611. self.zigzag_strength = self._nan_detect(self.zigzag_strength)
  612. # self.shake_strength = self._nan_detect(self.shake_strength)
  613. self.cadence_strength = self._nan_detect(self.cadence_strength)
  614. self.slam_brake_strength = self._nan_detect(self.slam_brake_strength)
  615. self.slam_accel_strength = self._nan_detect(self.slam_accel_strength)
  616. self.count_dict = {
  617. "zigzag": self.zigzag_count,
  618. # "shake": self.shake_count,
  619. "cadence": self.cadence_count,
  620. "slamBrake": self.slam_brake_count,
  621. "slamAccelerate": self.slam_accel_count
  622. }
  623. self.duration_dict = {
  624. "zigzag": self.zigzag_duration,
  625. # "shake": self.shake_duration,
  626. "cadence": self.cadence_duration,
  627. "slamBrake": self.slam_brake_duration,
  628. "slamAccelerate": self.slam_accel_duration
  629. }
  630. self.strength_dict = {
  631. "zigzag": self.zigzag_strength,
  632. # "shake": self.shake_strength,
  633. "cadence": self.cadence_strength,
  634. "slamBrake": self.slam_brake_strength,
  635. "slamAccelerate": self.slam_accel_strength
  636. }
  637. zigzag_list = [self.zigzag_count, self.zigzag_duration, self.zigzag_strength]
  638. # shake_list = [self.shake_count, self.shake_duration, self.shake_strength]
  639. cadence_list = [self.cadence_count, self.cadence_duration, self.cadence_strength]
  640. slam_brake_list = [self.slam_brake_count, self.slam_brake_duration, self.slam_brake_strength]
  641. slam_accel_list = [self.slam_accel_count, self.slam_accel_duration, self.slam_accel_strength]
  642. tmp_comf_arr = []
  643. if "zigzag" in self.metric_list:
  644. tmp_comf_arr += zigzag_list
  645. self.discomfort_count += self.zigzag_count
  646. # if "shake" in self.metric_list:
  647. # tmp_comf_arr += shake_list
  648. # self.discomfort_count += self.shake_count
  649. if "cadence" in self.metric_list:
  650. tmp_comf_arr += cadence_list
  651. self.discomfort_count += self.cadence_count
  652. if "slamBrake" in self.metric_list:
  653. tmp_comf_arr += slam_brake_list
  654. self.discomfort_count += self.slam_brake_count
  655. if "slamAccelerate" in self.metric_list:
  656. tmp_comf_arr += slam_accel_list
  657. self.discomfort_count += self.slam_accel_count
  658. comf_arr = [tmp_comf_arr]
  659. return comf_arr
  660. def _nan_detect(self, num):
  661. if math.isnan(num):
  662. return 0
  663. return num
  664. def custom_metric_param_parser(self, param_list):
  665. """
  666. param_dict = {
  667. "paramA" [
  668. {
  669. "kind": "-1",
  670. "optimal": "1",
  671. "multiple": ["0.5","5"],
  672. "spare1": null,
  673. "spare2": null
  674. }
  675. ]
  676. }
  677. """
  678. kind_list = []
  679. optimal_list = []
  680. multiple_list = []
  681. spare_list = []
  682. # spare1_list = []
  683. # spare2_list = []
  684. for i in range(len(param_list)):
  685. kind_list.append(int(param_list[i]['kind']))
  686. optimal_list.append(float(param_list[i]['optimal']))
  687. multiple_list.append([float(x) for x in param_list[i]['multiple']])
  688. spare_list.append([item["param"] for item in param_list[i]["spare"]])
  689. # spare1_list.append(param_list[i]['spare1'])
  690. # spare2_list.append(param_list[i]['spare2'])
  691. result = {
  692. "kind": kind_list,
  693. "optimal": optimal_list,
  694. "multiple": multiple_list,
  695. "spare": spare_list,
  696. # "spare1": spare1_list,
  697. # "spare2": spare2_list
  698. }
  699. return result
  700. def custom_metric_score(self, metric, value, param_list):
  701. """
  702. """
  703. param = self.custom_metric_param_parser(param_list)
  704. self.custom_param_dict[metric] = param
  705. score_model = self.scoreModel(param['kind'], param['optimal'], param['multiple'], np.array([value]))
  706. score_sub = score_model.cal_score()
  707. score = sum(score_sub) / len(score_sub)
  708. return score
  709. def comf_score_new(self):
  710. arr_comf = self.comf_statistic()
  711. print("\n[平顺性表现及得分情况]")
  712. print("平顺性各指标值:", [[round(num, 2) for num in row] for row in arr_comf])
  713. arr_comf = np.array(arr_comf)
  714. score_model = self.scoreModel(self.kind_list, self.optimal_list, self.multiple_list, arr_comf)
  715. score_sub = score_model.cal_score()
  716. score_sub = list(map(lambda x: 80 if np.isnan(x) else x, score_sub))
  717. metric_list = [x for x in self.metric_list if x in self.config.builtinMetricList]
  718. score_metric = []
  719. for i in range(len(metric_list)):
  720. score_tmp = (score_sub[i * 3 + 0] + score_sub[i * 3 + 1] + score_sub[i * 3 + 2]) / 3
  721. score_metric.append(round(score_tmp, 2))
  722. score_metric_dict = {key: value for key, value in zip(metric_list, score_metric)}
  723. custom_metric_list = list(self.customMetricParam.keys())
  724. for metric in custom_metric_list:
  725. value = self.custom_data[metric]['value']
  726. param_list = self.customMetricParam[metric]
  727. score = self.custom_metric_score(metric, value, param_list)
  728. score_metric_dict[metric] = round(score, 2)
  729. score_metric_dict = {key: score_metric_dict[key] for key in self.metric_list}
  730. score_metric = list(score_metric_dict.values())
  731. score_type_dict = {}
  732. if self.weight_custom: # 自定义权重
  733. score_metric_with_weight_dict = {key: score_metric_dict[key] * self.weight_dict[key] for key in
  734. self.weight_dict}
  735. for type in self.type_list:
  736. type_score = sum(
  737. value for key, value in score_metric_with_weight_dict.items() if key in self.metric_dict[type])
  738. score_type_dict[type] = round(type_score, 2)
  739. score_type_with_weight_dict = {key: score_type_dict[key] * self.weight_type_dict[key] for key in
  740. score_type_dict}
  741. score_comfort = sum(score_type_with_weight_dict.values())
  742. else: # 客观赋权
  743. self.weight_list = cal_weight_from_80(score_metric)
  744. self.weight_dict = {key: value for key, value in zip(self.metric_list, self.weight_list)}
  745. score_comfort = cal_score_with_priority(score_metric, self.weight_list, self.priority_list)
  746. for type in self.type_list:
  747. type_weight = sum(value for key, value in self.weight_dict.items() if key in self.metric_dict[type])
  748. self.weight_dict = {key: round(value / type_weight, 4) for key, value in self.weight_dict.items() if
  749. key in self.metric_dict[type]}
  750. type_score_metric = [value for key, value in score_metric_dict.items() if key in self.metric_dict[type]]
  751. type_weight_list = [value for key, value in self.weight_dict.items() if key in self.metric_dict[type]]
  752. type_priority_list = [value for key, value in self.priority_dict.items() if
  753. key in self.metric_dict[type]]
  754. type_score = cal_score_with_priority(type_score_metric, type_weight_list, type_priority_list)
  755. score_type_dict[type] = round(type_score, 2)
  756. score_comfort = round(score_comfort, 2)
  757. print("平顺性各指标基准值:", self.optimal_list)
  758. print(f"平顺性得分为:{score_comfort:.2f}分。")
  759. print(f"平顺性各类型得分为:{score_type_dict}。")
  760. print(f"平顺性各指标得分为:{score_metric_dict}。")
  761. return score_comfort, score_type_dict, score_metric_dict
  762. def zip_time_pairs(self, zip_list, upper_limit=9999):
  763. zip_time_pairs = zip(self.time_list, zip_list)
  764. zip_vs_time = [[x, upper_limit if y > upper_limit else y] for x, y in zip_time_pairs if not math.isnan(y)]
  765. return zip_vs_time
  766. def comf_weight_distribution(self):
  767. # get weight distribution
  768. weight_distribution = {}
  769. weight_distribution["name"] = "平顺性"
  770. if "comfortLat" in self.type_list:
  771. lat_weight_indexes_dict = {key: f"{key}({value * 100:.2f}%)" for key, value in self.weight_dict.items() if
  772. key in self.lat_metric_list}
  773. weight_distribution_lat = {
  774. "latWeight": f"横向舒适度({self.weight_type_dict['comfortLat'] * 100:.2f}%)",
  775. "indexes": lat_weight_indexes_dict
  776. }
  777. weight_distribution['comfortLat'] = weight_distribution_lat
  778. if "comfortLon" in self.type_list:
  779. lon_weight_indexes_dict = {key: f"{key}({value * 100:.2f}%)" for key, value in self.weight_dict.items() if
  780. key in self.lon_metric_list}
  781. weight_distribution_lon = {
  782. "lonWeight": f"纵向舒适度({self.weight_type_dict['comfortLon'] * 100:.2f}%)",
  783. "indexes": lon_weight_indexes_dict
  784. }
  785. weight_distribution['comfortLon'] = weight_distribution_lon
  786. return weight_distribution
  787. def _get_weight_distribution(self, dimension):
  788. # get weight distribution
  789. weight_distribution = {}
  790. weight_distribution["name"] = self.config.dimension_name[dimension]
  791. for type in self.type_list:
  792. type_weight_indexes_dict = {key: f"{self.name_dict[key]}({value * 100:.2f}%)" for key, value in
  793. self.weight_dict.items() if
  794. key in self.metric_dict[type]}
  795. weight_distribution_type = {
  796. "weight": f"{self.type_name_dict[type]}({self.weight_type_dict[type] * 100:.2f}%)",
  797. "indexes": type_weight_indexes_dict
  798. }
  799. weight_distribution[type] = weight_distribution_type
  800. return weight_distribution
  801. def report_statistic(self):
  802. """
  803. Returns:
  804. """
  805. # report_dict = {
  806. # "name": "平顺性",
  807. # "weight": f"{self.weight * 100:.2f}%",
  808. # "weightDistribution": weight_distribution,
  809. # "score": score_comfort,
  810. # "level": grade_comfort,
  811. # 'discomfortCount': self.discomfort_count,
  812. # "description1": comf_description1,
  813. # "description2": comf_description2,
  814. # "description3": comf_description3,
  815. # "description4": comf_description4,
  816. #
  817. # "comfortLat": lat_dict,
  818. # "comfortLon": lon_dict,
  819. #
  820. # "speData": ego_speed_vs_time,
  821. # "speMarkLine": discomfort_slices,
  822. #
  823. # "accData": lon_acc_vs_time,
  824. # "accMarkLine": discomfort_acce_slices,
  825. #
  826. # "anvData": yawrate_vs_time,
  827. # "anvMarkLine": discomfort_zigzag_slices,
  828. #
  829. # "anaData": yawrate_roc_vs_time,
  830. # "anaMarkLine": discomfort_zigzag_slices,
  831. #
  832. # "curData": [cur_ego_path_vs_time, curvature_vs_time],
  833. # "curMarkLine": discomfort_shake_slices,
  834. # }
  835. # brakePedal_list = self.data_processed.driver_ctrl_data['brakePedal_list']
  836. # throttlePedal_list = self.data_processed.driver_ctrl_data['throttlePedal_list']
  837. # steeringWheel_list = self.data_processed.driver_ctrl_data['steeringWheel_list']
  838. #
  839. # # common parameter calculate
  840. # brake_vs_time = self.zip_time_pairs(brakePedal_list, 100)
  841. # throttle_vs_time = self.zip_time_pairs(throttlePedal_list, 100)
  842. # steering_vs_time = self.zip_time_pairs(steeringWheel_list)
  843. report_dict = {
  844. "name": "平顺性",
  845. # "weight": f"{self.weight * 100:.2f}%",
  846. # 'discomfortCount': self.discomfort_count,
  847. }
  848. # upper_limit = 40
  849. # times_upper = 2
  850. # len_time = len(self.time_list)
  851. duration = self.time_list[-1]
  852. # comfort score and grade
  853. score_comfort, score_type_dict, score_metric_dict = self.comf_score_new()
  854. # get weight distribution
  855. # report_dict["weightDistribution"] = self._get_weight_distribution("comfort")
  856. score_comfort = int(score_comfort) if int(score_comfort) == score_comfort else round(score_comfort, 2)
  857. grade_comfort = score_grade(score_comfort)
  858. report_dict["score"] = score_comfort
  859. report_dict["level"] = grade_comfort
  860. # comfort data for graph
  861. ego_speed_list = self.ego_df['v'].values.tolist()
  862. ego_speed_vs_time = self.zip_time_pairs(ego_speed_list)
  863. lon_acc_list = self.ego_df['lon_acc'].values.tolist()
  864. lon_acc_vs_time = self.zip_time_pairs(lon_acc_list)
  865. yawrate_list = self.ego_df['speedH'].values.tolist()
  866. yawrate_vs_time = self.zip_time_pairs(yawrate_list)
  867. yawrate_roc_list = self.ego_df['accelH'].values.tolist()
  868. yawrate_roc_vs_time = self.zip_time_pairs(yawrate_roc_list)
  869. cur_ego_path_vs_time = self.zip_time_pairs(self.cur_ego_path_list)
  870. curvature_vs_time = self.zip_time_pairs(self.curvature_list)
  871. # markline
  872. # discomfort_df = self.discomfort_df.copy()
  873. # discomfort_df['type'] = "origin"
  874. # discomfort_slices = discomfort_df.to_dict('records')
  875. #
  876. # discomfort_zigzag_df = self.discomfort_df.copy()
  877. # discomfort_zigzag_df.loc[discomfort_zigzag_df['type'] != 'zigzag', 'type'] = "origin"
  878. # discomfort_zigzag_slices = discomfort_zigzag_df.to_dict('records')
  879. #
  880. # discomfort_shake_df = self.discomfort_df.copy()
  881. # discomfort_shake_df.loc[discomfort_shake_df['type'] != 'shake', 'type'] = "origin"
  882. # discomfort_shake_slices = discomfort_shake_df.to_dict('records')
  883. #
  884. # discomfort_acce_df = self.discomfort_df.copy()
  885. # discomfort_acce_df.loc[discomfort_acce_df['type'] == 'zigzag', 'type'] = "origin"
  886. # discomfort_acce_df.loc[discomfort_acce_df['type'] == 'shake', 'type'] = "origin"
  887. # discomfort_acce_slices = discomfort_acce_df.to_dict('records')
  888. # for description
  889. good_type_list = []
  890. bad_type_list = []
  891. good_metric_list = []
  892. bad_metric_list = []
  893. # str for comf description 1&2
  894. str_uncomf_count = ''
  895. str_uncomf_over_optimal = ''
  896. # type_details_dict = {}
  897. # for type in self.type_list:
  898. # bad_type_list.append(type) if score_type_dict[type] < 80 else good_type_list.append(type)
  899. #
  900. # type_dict = {
  901. # "name": f"{self.type_name_dict[type]}",
  902. # }
  903. #
  904. # builtin_graph_dict = {}
  905. # custom_graph_dict = {}
  906. #
  907. # score_type = score_type_dict[type]
  908. # grade_type = score_grade(score_type)
  909. # type_dict["score"] = score_type
  910. # type_dict["level"] = grade_type
  911. #
  912. # type_dict_indexes = {}
  913. #
  914. # flag_acc = False
  915. # for metric in self.metric_dict[type]:
  916. # bad_metric_list.append(metric) if score_metric_dict[metric] < 80 else good_metric_list.append(metric)
  917. #
  918. # if metric in self.bulitin_metric_list:
  919. # # for indexes
  920. # type_dict_indexes[metric] = {
  921. # "name": f"{self.name_dict[metric]}({self.unit_dict[metric]})",
  922. # "score": score_metric_dict[metric],
  923. # "numberReal": f"{self.count_dict[metric]}",
  924. # "numberRef": f"{self.optimal1_dict[metric]:.4f}",
  925. # "durationReal": f"{self.duration_dict[metric]:.2f}",
  926. # "durationRef": f"{self.optimal2_dict[metric]:.4f}",
  927. # "strengthReal": f"{self.strength_dict[metric]:.2f}",
  928. # "strengthRef": f"{self.optimal3_dict[metric]}"
  929. # }
  930. #
  931. # # for description
  932. # str_uncomf_count += f'{self.count_dict[metric]}次{self.name_dict[metric]}行为、'
  933. # if self.count_dict[metric] > self.optimal1_dict[metric]:
  934. # over_optimal = ((self.count_dict[metric] - self.optimal1_dict[metric]) / self.optimal1_dict[
  935. # metric]) * 100
  936. # str_uncomf_over_optimal += f'{self.name_dict[metric]}次数比基准值高{over_optimal:.2f}%,'
  937. #
  938. # if self.duration_dict[metric] > self.optimal2_dict[metric]:
  939. # over_optimal = ((self.duration_dict[metric] - self.optimal2_dict[metric]) / self.optimal2_dict[
  940. # metric]) * 100
  941. # str_uncomf_over_optimal += f'{self.name_dict[metric]}时长比基准值高{over_optimal:.2f}%,'
  942. #
  943. # if self.strength_dict[metric] > self.optimal3_dict[metric]:
  944. # over_optimal = ((self.strength_dict[metric] - self.optimal3_dict[metric]) / self.optimal3_dict[
  945. # metric]) * 100
  946. # str_uncomf_over_optimal += f'{self.name_dict[metric]}强度比基准值高{over_optimal:.2f}%;'
  947. #
  948. # # report_dict["speData"] = ego_speed_vs_time
  949. # # report_dict["accData"] = lon_acc_vs_time
  950. # # report_dict["anvData"] = yawrate_vs_time
  951. # # report_dict["anaData"] = yawrate_roc_vs_time
  952. # # report_dict["curData"] = [cur_ego_path_vs_time, curvature_vs_time]
  953. #
  954. # # report_dict["speMarkLine"] = discomfort_slices
  955. # # report_dict["accMarkLine"] = discomfort_acce_slices
  956. # # report_dict["anvMarkLine"] = discomfort_zigzag_slices
  957. # # report_dict["anaMarkLine"] = discomfort_zigzag_slices
  958. # # report_dict["curMarkLine"] = discomfort_shake_slices
  959. #
  960. # if metric == "zigzag":
  961. # metric_data = {
  962. # "name": "横摆角加速度(rad/s²)",
  963. # "data": yawrate_roc_vs_time,
  964. # "range": f"[0, {self.optimal3_dict[metric]}]",
  965. # # "markLine": discomfort_zigzag_slices
  966. # }
  967. # builtin_graph_dict[metric] = metric_data
  968. #
  969. # elif metric == "shake":
  970. # metric_data = {
  971. # "name": "曲率(1/m)",
  972. # "legend": ["自车轨迹曲率", "车道中心线曲率"],
  973. # "data": [cur_ego_path_vs_time, curvature_vs_time],
  974. # "range": f"[0, {self.optimal3_dict[metric]}]",
  975. # # "markLine": discomfort_shake_slices
  976. # }
  977. # builtin_graph_dict[metric] = metric_data
  978. #
  979. # elif metric in ["cadence", "slamBrake", "slamAccelerate"] and not flag_acc:
  980. # metric_data = {
  981. # "name": "自车纵向加速度(m/s²)",
  982. # "data": lon_acc_vs_time,
  983. # "range": f"[0, {self.optimal3_dict[metric]}]",
  984. # # "markLine": discomfort_acce_slices
  985. # }
  986. # flag_acc = True
  987. #
  988. # builtin_graph_dict[metric] = metric_data
  989. #
  990. # else:
  991. # # for indexes
  992. # type_dict_indexes[metric] = {
  993. # "name": f"{self.name_dict[metric]}({self.unit_dict[metric]})",
  994. # "score": score_metric_dict[metric],
  995. # "numberReal": f"{self.custom_data[metric]['tableData']['avg']}",
  996. # "numberRef": f"-",
  997. # "durationReal": f"{self.custom_data[metric]['tableData']['max']}",
  998. # "durationRef": f"-",
  999. # "strengthReal": f"{self.custom_data[metric]['tableData']['min']}",
  1000. # "strengthRef": f"-"
  1001. # }
  1002. # custom_graph_dict[metric] = self.custom_data[metric]['reportData']
  1003. #
  1004. # str_uncomf_over_optimal = str_uncomf_over_optimal[:-1] + ";"
  1005. # type_dict["indexes"] = type_dict_indexes
  1006. # type_dict["builtin"] = builtin_graph_dict
  1007. # type_dict["custom"] = custom_graph_dict
  1008. #
  1009. # type_details_dict[type] = type_dict
  1010. # report_dict["details"] = type_details_dict
  1011. # str for comf description2
  1012. # if grade_comfort == '优秀':
  1013. # comf_description1 = '机器人在本轮测试中行驶平顺;'
  1014. # elif grade_comfort == '良好':
  1015. # comf_description1 = '机器人在本轮测试中的表现满⾜设计指标要求;'
  1016. # elif grade_comfort == '一般':
  1017. # str_bad_metric = string_concatenate(bad_metric_list)
  1018. # comf_description1 = f'未满足设计指标要求。需要在{str_bad_metric}上进一步优化。在{(self.mileage / 1000):.2f}公里内,共发生{str_uncomf_count[:-1]};'
  1019. # elif grade_comfort == '较差':
  1020. # str_bad_metric = string_concatenate(bad_metric_list)
  1021. # comf_description1 = f'机器人行驶极不平顺,未满足设计指标要求。需要在{str_bad_metric}上进一步优化。在{(self.mileage / 1000):.2f}公里内,共发生{str_uncomf_count[:-1]};'
  1022. #
  1023. # if not bad_metric_list:
  1024. # str_comf_type = string_concatenate(good_metric_list)
  1025. # comf_description2 = f"{str_comf_type}均表现良好"
  1026. # else:
  1027. # str_bad_metric = string_concatenate(bad_metric_list)
  1028. #
  1029. # if not good_metric_list:
  1030. # comf_description2 = f"{str_bad_metric}表现不佳。其中{str_uncomf_over_optimal}"
  1031. # else:
  1032. # str_comf_type = string_concatenate(good_metric_list)
  1033. # comf_description2 = f"{str_comf_type}表现良好;{str_bad_metric}表现不佳。其中{str_uncomf_over_optimal}"
  1034. #
  1035. # # str for comf description3
  1036. # control_type = []
  1037. # if 'zigzag' in bad_metric_list or 'shake' in bad_metric_list:
  1038. # control_type.append('横向')
  1039. # if 'cadence' in bad_metric_list or 'slamBrake' in bad_metric_list or 'slamAccelerate' in bad_metric_list in bad_metric_list:
  1040. # control_type.append('纵向')
  1041. # str_control_type = '和'.join(control_type)
  1042. #
  1043. # if not control_type:
  1044. # comf_description3 = f"机器人的横向和纵向控制表现俱佳,行驶平顺"
  1045. # else:
  1046. # comf_description3 = f"应该优化对机器人的{str_control_type}控制,优化行驶平顺性"
  1047. #
  1048. # uncomf_time = self.discomfort_duration
  1049. # if uncomf_time == 0:
  1050. # comf_description4 = ""
  1051. # else:
  1052. # percent4 = uncomf_time / duration * 100
  1053. # # comf_description4 = f"在{duration}s时间内,机器人有{percent4:.2f}%的时间存在行驶不平顺的情况。"
  1054. # comf_description4 = f"在{duration}s时间内,机器人有{uncomf_time:.2f}s的时间存在行驶不平顺的情况。"
  1055. # report_dict["description1"] = replace_key_with_value(comf_description1, self.name_dict)
  1056. # report_dict["description2"] = replace_key_with_value(comf_description2, self.name_dict)
  1057. # report_dict["description3"] = comf_description3
  1058. # report_dict["description4"] = comf_description4
  1059. description = "· 在平顺性方面,"
  1060. if any(score_metric_dict[metric] < 80 for metric in self.lon_metric_list):
  1061. description += "线加速度变化剧烈,"
  1062. tmp = [metric for metric in self.lon_metric_list if score_metric_dict[metric] < 80]
  1063. str_tmp = "、".join(tmp)
  1064. description += f"有{str_tmp}情况,需重点优化。"
  1065. if any(score_metric_dict[metric] < 80 for metric in self.lat_metric_list):
  1066. description += "角加速度变化剧烈,"
  1067. tmp = [metric for metric in self.lat_metric_list if score_metric_dict[metric] < 80]
  1068. str_tmp = "、".join(tmp)
  1069. description += f"有{str_tmp}情况,需重点优化。"
  1070. if description == "在平顺性方面,":
  1071. description += f"线加速度和角加速度变化平顺,表现{grade_comfort}。"
  1072. report_dict["description"] = replace_key_with_value(description, self.name_dict)
  1073. description1 = f"最大值:{self.linear_accel_dict['max']:.4f}m/s²;" \
  1074. f"最小值:{self.linear_accel_dict['min']:.4f}m/s²;" \
  1075. f"平均值:{self.linear_accel_dict['avg']:.4f}m/s²"
  1076. description2 = f"最大值:{self.angular_accel_dict['max']:.4f}rad/s²;" \
  1077. f"最小值:{self.angular_accel_dict['min']:.4f}rad/s²;" \
  1078. f"平均值:{self.angular_accel_dict['avg']:.4f}rad/s²"
  1079. report_dict["description1"] = description1
  1080. report_dict["description2"] = description2
  1081. plt.figure(figsize=(12, 3))
  1082. plt.plot(self.time_list, self.accel_list, label='Linear Accelerate')
  1083. plt.xlabel('Time(s)')
  1084. plt.ylabel('Linear Accelerate(m/s^2)')
  1085. plt.legend()
  1086. # 调整布局,消除空白边界
  1087. plt.tight_layout()
  1088. plt.savefig(os.path.join(self.resultPath, "LinearAccelerate.png"))
  1089. plt.close()
  1090. plt.figure(figsize=(12, 3))
  1091. plt.plot(self.time_list, self.accelH_list, label='Angular Accelerate')
  1092. plt.xlabel('Time(s)')
  1093. plt.ylabel('Angular Accelerate(rad/s^2)')
  1094. plt.legend()
  1095. # 调整布局,消除空白边界
  1096. plt.tight_layout()
  1097. plt.savefig(os.path.join(self.resultPath, "AngularAccelerate.png"))
  1098. plt.close()
  1099. print(report_dict)
  1100. # report_dict['commonData'] = {
  1101. # # "per": {
  1102. # # "name": "刹车/油门踏板开度(百分比)",
  1103. # # "legend": ["刹车踏板开度", "油门踏板开度"],
  1104. # # "data": [brake_vs_time, throttle_vs_time]
  1105. # # },
  1106. # # "ang": {
  1107. # # "name": "方向盘转角(角度°)",
  1108. # # "data": steering_vs_time
  1109. # # },
  1110. # "spe": {
  1111. # "name": "速度(km/h)",
  1112. # # "legend": ["自车速度", "目标车速度", "自车与目标车相对速度"],
  1113. # "data": ego_speed_vs_time
  1114. #
  1115. # },
  1116. # # "acc": {
  1117. # # "name": "自车纵向加速度(m/s²)",
  1118. # # "data": lon_acc_vs_time
  1119. # #
  1120. # # },
  1121. # # "dis": {
  1122. # # "name": "前车距离(m)",
  1123. # # "data": distance_vs_time
  1124. # # }
  1125. # }
  1126. # report_dict["commonMarkLine"] = discomfort_slices
  1127. # report_dict = {
  1128. # "name": "平顺性",
  1129. # "weight": f"{self.weight * 100:.2f}%",
  1130. # "weightDistribution": weight_distribution,
  1131. # "score": score_comfort,
  1132. # "level": grade_comfort,
  1133. # 'discomfortCount': self.discomfort_count,
  1134. # "description1": comf_description1,
  1135. # "description2": comf_description2,
  1136. # "description3": comf_description3,
  1137. # "description4": comf_description4,
  1138. #
  1139. # "comfortLat": lat_dict,
  1140. # "comfortLon": lon_dict,
  1141. #
  1142. # "speData": ego_speed_vs_time,
  1143. # "speMarkLine": discomfort_slices,
  1144. #
  1145. # "accData": lon_acc_vs_time,
  1146. # "accMarkLine": discomfort_acce_slices,
  1147. #
  1148. # "anvData": yawrate_vs_time,
  1149. # "anvMarkLine": discomfort_zigzag_slices,
  1150. #
  1151. # "anaData": yawrate_roc_vs_time,
  1152. # "anaMarkLine": discomfort_zigzag_slices,
  1153. #
  1154. # "curData": [cur_ego_path_vs_time, curvature_vs_time],
  1155. # "curMarkLine": discomfort_shake_slices,
  1156. # }
  1157. # self.eval_data = self.ego_df.copy()
  1158. # self.eval_data['playerId'] = 1
  1159. return report_dict
  1160. def get_eval_data(self):
  1161. df = self.eval_data[
  1162. ['simTime', 'simFrame', 'playerId', 'ip_acc', 'ip_dec', 'slam_brake', 'slam_accel', 'cadence']].copy()
  1163. return df