comfort.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: zhanghaiwen(zhanghaiwen@china-icv.cn), yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/06/25
  11. @Last Modified: 2023/06/25
  12. @Summary: Comfort metrics
  13. """
  14. import os
  15. import sys
  16. import math
  17. import pandas as pd
  18. import numpy as np
  19. import scipy.signal
  20. import matplotlib.pyplot as plt
  21. plt.rcParams['font.family'] = 'sans-serif'
  22. plt.rcParams['font.sans-serif'] = ['SimHei'] # 或者使用你系统中有的其他支持中文的字体
  23. plt.rcParams['axes.unicode_minus'] = False # 解决保存图像时负号'-'显示为方块的问题
  24. sys.path.append('../common')
  25. sys.path.append('../modules')
  26. sys.path.append('../results')
  27. from data_info import DataInfoList
  28. from score_weight import cal_score_with_priority, cal_weight_from_80
  29. from common import get_interpolation, score_grade, string_concatenate, replace_key_with_value, _cal_max_min_avg
  30. import matplotlib.pyplot as plt
  31. def peak_valley_decorator(method):
  32. def wrapper(self, *args, **kwargs):
  33. peak_valley = self._peak_valley_determination(self.df)
  34. print("peak_valley is", peak_valley)
  35. pv_list = self.df.loc[peak_valley, ['simTime', 'speedH']].values.tolist()
  36. print("pv_list is", pv_list)
  37. if len(pv_list) != 0:
  38. flag = True
  39. p_last = pv_list[0]
  40. for i in range(1, len(pv_list)):
  41. p_curr = pv_list[i]
  42. print("self._peak_valley_judgment(p_last, p_curr) is", self._peak_valley_judgment(p_last, p_curr))
  43. if self._peak_valley_judgment(p_last, p_curr):
  44. print("p_curr is", p_curr, p_last)
  45. # method(self, p_curr, p_last)
  46. method(self, p_curr, p_last, flag, *args, **kwargs)
  47. print("method(self, p_curr, p_last, flag, *args, **kwargs) is", method(self, p_curr, p_last, flag, *args, **kwargs))
  48. else:
  49. p_last = p_curr
  50. print("when p_last is", p_last)
  51. return method
  52. else:
  53. flag = False
  54. p_curr = [0, 0]
  55. p_last = [0, 0]
  56. method(self, p_curr, p_last, flag, *args, **kwargs)
  57. return method
  58. return wrapper
  59. class Comfort(object):
  60. """
  61. Class for achieving comfort metrics for autonomous driving.
  62. Attributes:
  63. dataframe: Vehicle driving data, stored in dataframe format.
  64. """
  65. def __init__(self, data_processed, scoreModel, resultPath):
  66. self.eval_data = pd.DataFrame()
  67. self.data_processed = data_processed
  68. self.scoreModel = scoreModel
  69. self.resultPath = resultPath
  70. # self.data = data_processed.obj_data[1]
  71. self.data = data_processed.ego_df
  72. self.mileage = data_processed.report_info['mileage']
  73. self.ego_df = pd.DataFrame()
  74. # self.discomfort_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
  75. self.config = data_processed.config
  76. comfort_config = data_processed.comfort_config
  77. self.comfort_config = comfort_config
  78. print("self.comfort_config is", self.comfort_config)
  79. # common data
  80. self.bulitin_metric_list = self.config.builtinMetricList
  81. # dimension data
  82. self.weight_custom = comfort_config['weightCustom']
  83. self.metric_list = comfort_config['metric']
  84. self.type_list = comfort_config['type']
  85. self.type_name_dict = comfort_config['typeName']
  86. self.name_dict = comfort_config['name']
  87. self.unit_dict = comfort_config['unit']
  88. # custom metric data
  89. # self.customMetricParam = comfort_config['customMetricParam']
  90. # self.custom_metric_list = list(self.customMetricParam.keys())
  91. # self.custom_data = custom_data
  92. # self.custom_param_dict = {}
  93. # score data
  94. self.weight = comfort_config['weightDimension']
  95. self.weight_type_dict = comfort_config['typeWeight']
  96. self.weight_type_list = comfort_config['typeWeightList']
  97. self.weight_dict = comfort_config['weight']
  98. self.weight_list = comfort_config['weightList']
  99. self.priority_dict = comfort_config['priority']
  100. self.priority_list = comfort_config['priorityList']
  101. self.kind_dict = comfort_config['kind']
  102. self.optimal_dict = comfort_config['optimal']
  103. # self.optimal1_dict = self.optimal_dict[0]
  104. # self.optimal2_dict = self.optimal_dict[1]
  105. # self.optimal3_dict = self.optimal_dict[2]
  106. self.multiple_dict = comfort_config['multiple']
  107. self.kind_list = comfort_config['kindList']
  108. print("self.kind_list is", self.kind_list)
  109. self.optimal_list = comfort_config['optimalList']
  110. print("self.optimal_list is", self.optimal_list)
  111. self.multiple_list = comfort_config['multipleList']
  112. # metric data
  113. self.metric_dict = comfort_config['typeMetricDict']
  114. print("self.metric_dict is", self.metric_dict)
  115. # print("self.metric_dict is", self.metric_dict)
  116. self.zigzag_metric_list = self.metric_dict['zigzag']
  117. # self.cadence_metric_list = self.metric_dict['cadence']
  118. # self.zigzag_metric_list = self.metric_dict['comfortLat']['zigzag']
  119. # print("self.zigzag_metric_list is", self.zigzag_metric_list)
  120. self.cadence_metric_list = ["cadence", "slamBrake", "slamAccelerate"]
  121. self.time_list = data_processed.driver_ctrl_data['time_list']
  122. self.frame_list = data_processed.driver_ctrl_data['frame_list']
  123. self.speed_list = []
  124. self.commandSpeed_list = []
  125. self.accel_list = []
  126. self.accelH_list = []
  127. self.linear_accel_dict = dict()
  128. self.angular_accel_dict = dict()
  129. self.speed_instruction_dict = dict()
  130. self.count_dict = {}
  131. self.duration_dict = {}
  132. self.strength_dict = {}
  133. self.discomfort_count = 0
  134. self.zigzag_count = 0
  135. # self.shake_count = 0
  136. self.cadence_count = 0
  137. self.slam_brake_count = 0
  138. self.slam_accel_count = 0
  139. self.speed_instruction_jump_count = 0
  140. self.zigzag_time = []
  141. # self.shake_count = 0
  142. self.cadence_time = []
  143. self.slam_brake_time = []
  144. self.slam_accel_time = []
  145. self.speed_instruction_jump_time = []
  146. # self.zigzag_strength = 0
  147. # self.shake_strength = 0
  148. # self.cadence_strength = 0
  149. # self.slam_brake_strength = 0
  150. # self.slam_accel_strength = 0
  151. #
  152. self.discomfort_duration = 0
  153. # self.zigzag_duration = 0
  154. # self.shake_duration = 0
  155. # self.cadence_duration = 0
  156. # self.slam_brake_duration = 0
  157. # self.slam_accel_duration = 0
  158. self.zigzag_time_list = []
  159. # self.zigzag_frame_list = []
  160. self.zigzag_stre_list = []
  161. self.cur_ego_path_list = []
  162. self.curvature_list = []
  163. self.discomfort_count_list = []
  164. self._get_data()
  165. self._comf_param_cal()
  166. def _get_data(self):
  167. """
  168. """
  169. comfort_info_list = DataInfoList.COMFORT_INFO
  170. self.ego_df = self.data[comfort_info_list].copy()
  171. # self.df = self.ego_df.set_index('simFrame') # 索引是csv原索引
  172. self.df = self.ego_df.reset_index(drop=True) # 索引是csv原索引
  173. def _cal_cur_ego_path(self, row):
  174. try:
  175. divide = (row['speedX'] ** 2 + row['speedY'] ** 2) ** (3 / 2)
  176. if not divide:
  177. res = None
  178. else:
  179. res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
  180. except:
  181. res = None
  182. return res
  183. def _comf_param_cal(self):
  184. """
  185. """
  186. # for i in range(len(self.optimal_list)):
  187. # if i % 3 == 2:
  188. # continue
  189. # else:
  190. # self.optimal_list[i] = round(self.optimal_list[i] * self.mileage / 100000, 8)
  191. # self.optimal_list = [round(self.optimal_list[i] * self.mileage / 100000, 8) for i in range(len(self.optimal_list))]
  192. self.optimal_dict = {key: value * self.mileage / 100000 for key, value in self.optimal_dict.copy().items()}
  193. # self.optimal1_dict = {key: value * self.mileage / 100000 for key, value in self.optimal1_dict.copy().items()}
  194. # self.optimal2_dict = {key: value * self.mileage / 100000 for key, value in self.optimal2_dict.copy().items()}
  195. # [log]
  196. self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
  197. self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
  198. self.ego_df['slam_brake'] = self.ego_df.apply(
  199. lambda row: self._slam_brake_process(row['lon_acc'], row['ip_dec']), axis=1)
  200. self.ego_df['slam_accel'] = self.ego_df.apply(
  201. lambda row: self._slam_accelerate_process(row['lon_acc'], row['ip_acc']), axis=1)
  202. self.ego_df['cadence'] = self.ego_df.apply(
  203. lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
  204. self.speed_list = self.ego_df['v'].values.tolist()
  205. self.commandSpeed_list = self.ego_df['cmd_lon_v'].values.tolist()
  206. self.accel_list = self.ego_df['accel'].values.tolist()
  207. self.accelH_list = self.ego_df['accelH'].values.tolist()
  208. v_jump_threshold = 0.5
  209. self.ego_df['cmd_lon_v_diff'] = self.ego_df['cmd_lon_v'].diff()
  210. self.ego_df['cmd_v_jump'] = (abs(self.ego_df['cmd_lon_v_diff']) > v_jump_threshold).astype(int)
  211. self.linear_accel_dict = _cal_max_min_avg(self.ego_df['accel'].dropna().values.tolist())
  212. self.angular_accel_dict = _cal_max_min_avg(self.ego_df['accelH'].dropna().values.tolist())
  213. self.speed_instruction_dict = _cal_max_min_avg(self.ego_df['cmd_lon_v_diff'].dropna().values.tolist())
  214. def _peak_valley_determination(self, df):
  215. """
  216. Determine the peak and valley of the vehicle based on its current angular velocity.
  217. Parameters:
  218. df: Dataframe containing the vehicle angular velocity.
  219. Returns:
  220. peak_valley: List of indices representing peaks and valleys.
  221. """
  222. peaks, _ = scipy.signal.find_peaks(df['speedH'], height=0.01, distance=1, prominence=0.01)
  223. valleys, _ = scipy.signal.find_peaks(-df['speedH'], height=0.01, distance=1, prominence=0.01)
  224. peak_valley = sorted(list(peaks) + list(valleys))
  225. return peak_valley
  226. def is_not_on_any_interval_endpoint(self, number, intervals):
  227. # 遍历所有给定的区间
  228. for interval in intervals:
  229. interval_start, interval_end = interval
  230. # 检查数字是否等于区间的起始值或结束值
  231. if number == interval_start or number == interval_end:
  232. return False # 如果在任何一个区间的端点上,则返回False
  233. # 如果不在任何区间的端点上,则返回True
  234. return True
  235. def _peak_valley_judgment(self, p_last, p_curr, tw=6000, avg=0.4):
  236. """
  237. Determine if the given peaks and valleys satisfy certain conditions.
  238. Parameters:
  239. p_last: Previous peak or valley data point.
  240. p_curr: Current peak or valley data point.
  241. tw: Threshold time difference between peaks and valleys.
  242. avg: Angular velocity gap threshold.
  243. Returns:
  244. Boolean indicating whether the conditions are satisfied.
  245. """
  246. t_diff = p_curr[0] - p_last[0]
  247. v_diff = abs(p_curr[1] - p_last[1])
  248. s = p_curr[1] * p_last[1]
  249. zigzag_flag = t_diff < tw and v_diff > avg and s < 0
  250. if zigzag_flag and self.is_not_on_any_interval_endpoint(p_last[0], self.zigzag_time_list):
  251. self.zigzag_time_list.append([p_last[0], p_curr[0]])
  252. self.zigzag_time = self.zigzag_time_list
  253. return zigzag_flag
  254. @peak_valley_decorator
  255. def zigzag_count_func(self, p_curr, p_last, flag=True):
  256. """
  257. Count the number of zigzag movements.
  258. Parameters:
  259. df: Input dataframe data.
  260. Returns:
  261. zigzag_count: Number of zigzag movements.
  262. """
  263. if flag:
  264. self.zigzag_count += 1
  265. else:
  266. self.zigzag_count += 0
  267. @peak_valley_decorator
  268. def cal_zigzag_strength_strength(self, p_curr, p_last, flag=True):
  269. """
  270. Calculate various strength statistics.
  271. Returns:
  272. Tuple containing maximum strength, minimum strength,
  273. average strength, and 99th percentile strength.
  274. """
  275. if flag:
  276. v_diff = abs(p_curr[1] - p_last[1])
  277. t_diff = p_curr[0] - p_last[0]
  278. self.zigzag_stre_list.append(v_diff / t_diff) # 平均角加速度
  279. else:
  280. self.zigzag_stre_list = []
  281. def _cadence_process(self, lon_acc_roc, ip_dec_roc):
  282. if abs(lon_acc_roc) >= abs(ip_dec_roc) or abs(lon_acc_roc) < 1:
  283. return np.nan
  284. # elif abs(lon_acc_roc) == 0:
  285. elif abs(lon_acc_roc) == 0:
  286. return 0
  287. elif lon_acc_roc > 0 and lon_acc_roc < -ip_dec_roc:
  288. return 1
  289. elif lon_acc_roc < 0 and lon_acc_roc > ip_dec_roc:
  290. return -1
  291. def _slam_brake_process(self, lon_acc, ip_dec):
  292. if lon_acc - ip_dec < 0:
  293. return 1
  294. else:
  295. return 0
  296. def _slam_accelerate_process(self, lon_acc, ip_acc):
  297. if lon_acc - ip_acc > 0:
  298. return 1
  299. else:
  300. return 0
  301. def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
  302. if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
  303. return np.nan
  304. # elif abs(lon_acc_roc) == 0:
  305. elif abs(lon_acc) == 0:
  306. return 0
  307. elif lon_acc > 0 and lon_acc < ip_acc:
  308. return 1
  309. elif lon_acc < 0 and lon_acc > ip_dec:
  310. return -1
  311. def _cadence_detector(self):
  312. """
  313. # 加速度突变:先加后减,先减后加,先加然后停,先减然后停
  314. # 顿挫:2s内多次加速度变化率突变
  315. # 求出每一个特征点,然后提取,然后将每一个特征点后面的2s做一个窗口,统计频率,避免无效运算
  316. # 将特征点筛选出来
  317. # 将特征点时间作为聚类标准,大于1s的pass,小于等于1s的聚类到一个分组
  318. # 去掉小于3个特征点的分组
  319. """
  320. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'cadence']].copy()
  321. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'cadence']].copy()
  322. time_list = data['simTime'].values.tolist()
  323. data = data[data['cadence'] != np.nan]
  324. data['cadence_diff'] = data['cadence'].diff()
  325. data.dropna(subset='cadence_diff', inplace=True)
  326. data = data[data['cadence_diff'] != 0]
  327. t_list = data['simTime'].values.tolist()
  328. f_list = data['simFrame'].values.tolist()
  329. group_time = []
  330. group_frame = []
  331. sub_group_time = []
  332. sub_group_frame = []
  333. for i in range(len(f_list)):
  334. if not sub_group_time or t_list[i] - t_list[i - 1] <= 1: # 特征点相邻一秒内的,算作同一组顿挫
  335. sub_group_time.append(t_list[i])
  336. sub_group_frame.append(f_list[i])
  337. else:
  338. group_time.append(sub_group_time)
  339. group_frame.append(sub_group_frame)
  340. sub_group_time = [t_list[i]]
  341. sub_group_frame = [f_list[i]]
  342. group_time.append(sub_group_time)
  343. print("group_time is", group_time)
  344. group_frame.append(sub_group_frame)
  345. group_time = [g for g in group_time if len(g) >= 1] # 有一次特征点则算作一次顿挫
  346. print("group_time_new is", group_time)
  347. # group_frame = [g for g in group_frame if len(g) >= 1]
  348. # 输出图表值
  349. cadence_time = [[g[0], g[-1]] for g in group_time]
  350. # cadence_frame = [[g[0], g[-1]] for g in group_frame]
  351. # 将顿挫组的起始时间为组重新统计时间
  352. cadence_time_list = [time for pair in cadence_time for time in time_list if pair[0] <= time <= pair[1]]
  353. print("cadence_time_list is", group_time)
  354. # stre_list = []
  355. freq_list = []
  356. for g in group_time:
  357. # calculate strength
  358. g_df = data[data['simTime'].isin(g)]
  359. # strength = g_df['lon_acc'].abs().mean()
  360. # stre_list.append(strength)
  361. # calculate frequency
  362. cnt = len(g)
  363. t_start = g_df['simTime'].iloc[0]
  364. t_end = g_df['simTime'].iloc[-1]
  365. t_delta = t_end - t_start
  366. frequency = cnt / t_delta
  367. freq_list.append(frequency)
  368. self.cadence_count = len(freq_list)
  369. # cadence_stre = sum(stre_list) / len(stre_list) if stre_list else 0
  370. self.cadence_time = group_time
  371. return cadence_time_list
  372. def _slam_brake_detector(self):
  373. # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
  374. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_dec_roc', 'slam_brake']].copy()
  375. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'ip_dec', 'slam_brake']].copy()
  376. # data['slam_diff'] = data['slam_brake'].diff()
  377. # res_df = data[data['slam_diff'] == 1]
  378. res_df = data[data['slam_brake'] == 1]
  379. t_list = res_df['simTime'].values
  380. f_list = res_df['simFrame'].values.tolist()
  381. group_time = []
  382. group_frame = []
  383. sub_group_time = []
  384. sub_group_frame = []
  385. for i in range(len(f_list)):
  386. if not sub_group_time or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急刹
  387. sub_group_time.append(t_list[i])
  388. sub_group_frame.append(f_list[i])
  389. else:
  390. group_time.append(sub_group_time)
  391. group_frame.append(sub_group_frame)
  392. sub_group_time = [t_list[i]]
  393. sub_group_frame = [f_list[i]]
  394. group_time.append(sub_group_time)
  395. group_frame.append(sub_group_frame)
  396. group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急刹
  397. # group_frame = [g for g in group_frame if len(g) >= 2]
  398. time_list = [element for sublist in group_time for element in sublist]
  399. self.slam_brake_count = len(group_time) # / self.mileage # * 1000000
  400. self.slam_brake_time = group_time
  401. return time_list
  402. def _slam_accel_detector(self):
  403. # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
  404. # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_acc_roc', 'slam_accel']].copy()
  405. data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'ip_acc', 'slam_accel']].copy()
  406. # data['slam_diff'] = data['slam_accel'].diff()
  407. # res_df = data.loc[data['slam_diff'] == 1]
  408. res_df = data.loc[data['slam_accel'] == 1]
  409. t_list = res_df['simTime'].values
  410. f_list = res_df['simFrame'].values.tolist()
  411. group_time = []
  412. group_frame = []
  413. sub_group_time = []
  414. sub_group_frame = []
  415. for i in range(len(f_list)):
  416. if not group_time or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急加速
  417. sub_group_time.append(t_list[i])
  418. sub_group_frame.append(f_list[i])
  419. else:
  420. group_time.append(sub_group_time)
  421. group_frame.append(sub_group_frame)
  422. sub_group_time = [t_list[i]]
  423. sub_group_frame = [f_list[i]]
  424. group_time.append(sub_group_time)
  425. group_frame.append(sub_group_frame)
  426. group_time = [g for g in group_time if len(g) >= 2]
  427. # group_frame = [g for g in group_frame if len(g) >= 2]
  428. time_list = [element for sublist in group_time for element in sublist]
  429. self.slam_accel_count = len(group_time) # / self.mileage # * 1000000
  430. print("slam_accel group_time is", group_time)
  431. self.slam_accel_time = group_time
  432. return time_list
  433. def _speed_instruction_jump_detector(self):
  434. data = self.ego_df[['simTime', 'simFrame', 'cmd_lon_v', 'cmd_lon_v_diff', 'cmd_v_jump']].copy()
  435. # data['slam_diff'] = data['slam_accel'].diff()
  436. # res_df = data.loc[data['slam_diff'] == 1]
  437. res_df = data.loc[data['cmd_v_jump'] == 1]
  438. t_list = res_df['simTime'].values
  439. f_list = res_df['simFrame'].values.tolist()
  440. group_time = []
  441. group_frame = []
  442. sub_group_time = []
  443. sub_group_frame = []
  444. for i in range(len(f_list)):
  445. if not group_time or f_list[i] - f_list[i - 1] <= 10: # 连续帧的算作同一组跳变
  446. sub_group_time.append(t_list[i])
  447. sub_group_frame.append(f_list[i])
  448. else:
  449. group_time.append(sub_group_time)
  450. group_frame.append(sub_group_frame)
  451. sub_group_time = [t_list[i]]
  452. sub_group_frame = [f_list[i]]
  453. group_time.append(sub_group_time)
  454. group_frame.append(sub_group_frame)
  455. group_time = [g for g in group_time if len(g) >= 2]
  456. # group_frame = [g for g in group_frame if len(g) >= 2]
  457. time_list = [element for sublist in group_time for element in sublist]
  458. self.speed_instruction_jump_count = len(group_time) # / self.mileage # * 1000000
  459. print("speed_instruction_jump group_time is", group_time)
  460. self.speed_instruction_jump_time = group_time
  461. return time_list
  462. def comf_statistic(self):
  463. """
  464. """
  465. # df = self.ego_df[['simTime', 'cur_diff', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
  466. df = self.ego_df[['simTime', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
  467. self.zigzag_count_func()
  468. self.cal_zigzag_strength_strength()
  469. # if self.zigzag_time_list:
  470. # zigzag_df = pd.DataFrame(self.zigzag_time_list, columns=['start_time', 'end_time'])
  471. # zigzag_df = get_frame_with_time(zigzag_df, self.ego_df)
  472. # zigzag_df['type'] = 'zigzag'
  473. # self.discomfort_df = pd.concat([self.discomfort_df, zigzag_df], ignore_index=True)
  474. # discomfort_df = pd.concat([time_df, frame_df], axis=1)
  475. # self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
  476. zigzag_t_list = []
  477. # 只有[t_start, t_end]数对,要提取为完整time list
  478. t_list = df['simTime'].values.tolist()
  479. for t_start, t_end in self.zigzag_time_list:
  480. index_1 = t_list.index(t_start)
  481. index_2 = t_list.index(t_end)
  482. zigzag_t_list.extend(t_list[index_1:index_2 + 1])
  483. zigzag_t_list = list(set(zigzag_t_list))
  484. # shake_t_list = self._shake_detector()
  485. cadence_t_list = self._cadence_detector()
  486. print("cadence_t_list is", cadence_t_list)
  487. slam_brake_t_list = self._slam_brake_detector()
  488. slam_accel_t_list = self._slam_accel_detector()
  489. speed_instruction_jump_t_list = self._speed_instruction_jump_detector()
  490. print("speed_instruction_jump_t_list is", speed_instruction_jump_t_list)
  491. discomfort_time_list = zigzag_t_list + cadence_t_list + slam_brake_t_list + slam_accel_t_list + speed_instruction_jump_t_list
  492. discomfort_time_list = sorted(discomfort_time_list) # 排序
  493. discomfort_time_list = list(set(discomfort_time_list)) # 去重
  494. time_diff = self.time_list[3] - self.time_list[2]
  495. # time_diff = 0.4
  496. self.discomfort_duration = len(discomfort_time_list) * time_diff
  497. self.count_dict = {
  498. "zigzag": len(self.zigzag_time),
  499. # "shake": self.shake_count,
  500. "cadence": self.cadence_count,
  501. "slamBrake": self.slam_brake_count,
  502. "slamAccelerate": self.slam_accel_count,
  503. "speedInstructionJump": self.speed_instruction_jump_count
  504. }
  505. tmp_comf_arr = [len(self.zigzag_time), self.cadence_count, self.slam_brake_count, self.slam_accel_count,
  506. self.speed_instruction_jump_count]
  507. self.discomfort_count = sum(tmp_comf_arr)
  508. self.discomfort_count_list = [len(self.zigzag_time), self.cadence_count, self.slam_brake_count, self.slam_accel_count,
  509. self.speed_instruction_jump_count]
  510. comf_arr = [tmp_comf_arr]
  511. return comf_arr
  512. # def _nan_detect(self, num):
  513. # if math.isnan(num):
  514. # return 0
  515. # return num
  516. def comf_score_new(self):
  517. arr_comf = self.comf_statistic()
  518. print("\n[平顺性表现及得分情况]")
  519. print("平顺性各指标值:", [[round(num, 2) for num in row] for row in arr_comf])
  520. arr_comf = np.array(arr_comf)
  521. # score_model = self.scoreModel(self.kind_list, self.optimal_list, self.multiple_list, arr_comf)
  522. # score_sub = score_model.cal_score()
  523. # print("score_sub is", score_sub)
  524. score_metric = list(map(lambda x: 0 if (x>0) else 100, self.discomfort_count_list))
  525. # score_metric = list(map(lambda x: 80 if np.isnan(x) else x, score_sub))
  526. print("score_metric is", score_metric)
  527. metric_list = [x for x in self.metric_list if x in self.config.builtinMetricList]
  528. print("metric_list is", metric_list)
  529. score_metric_dict = {key: value for key, value in zip(metric_list, score_metric)}
  530. # custom_metric_list = list(self.customMetricParam.keys())
  531. # for metric in custom_metric_list:
  532. # value = self.custom_data[metric]['value']
  533. # param_list = self.customMetricParam[metric]
  534. # score = self.custom_metric_score(metric, value, param_list)
  535. # score_metric_dict[metric] = round(score, 2)
  536. # score_metric_dict = {key: score_metric_dict[key] for key in self.metric_list}
  537. # score_metric = list(score_metric_dict.values())
  538. score_type_dict = {}
  539. if self.weight_custom: # 自定义权重
  540. score_metric_with_weight_dict = {key: score_metric_dict[key] * self.weight_dict[key] for key in
  541. self.weight_dict}
  542. print("score_metric_with_weight_dict is", score_metric_with_weight_dict)
  543. print("self.weight_dict is", self.weight_dict)
  544. for type1 in self.type_list:
  545. # type_score = sum(
  546. # value for key, value in score_metric_with_weight_dict.items() if key in self.metric_dict[type])
  547. score_list = [float(value) for key, value in score_metric_with_weight_dict.items() if key in self.metric_dict[type1]]
  548. type_score = sum(score_list)
  549. print("type_score is", "{:.2f}".format(type_score))
  550. score_type_dict[type1] = type_score
  551. score_type_with_weight_dict = {key: score_type_dict[key] * self.weight_type_dict[key] for key in
  552. score_type_dict}
  553. score_comfort = sum(score_type_with_weight_dict.values())
  554. else: # 客观赋权
  555. self.weight_list = cal_weight_from_80(score_metric)
  556. self.weight_dict = {key: value for key, value in zip(self.metric_list, self.weight_list)}
  557. score_comfort = cal_score_with_priority(score_metric, self.weight_list, self.priority_list)
  558. for type1 in self.type_list:
  559. type_weight = sum(value for key, value in self.weight_dict.items() if key in self.metric_dict[type1])
  560. self.weight_dict = {key: round(value / type_weight, 4) for key, value in self.weight_dict.items() if
  561. key in self.metric_dict[type1]}
  562. type_score_metric = [value for key, value in score_metric_dict.items() if key in self.metric_dict[type1]]
  563. type_weight_list = [value for key, value in self.weight_dict.items() if key in self.metric_dict[type1]]
  564. type_priority_list = [value for key, value in self.priority_dict.items() if
  565. key in self.metric_dict[type1]]
  566. type_score = cal_score_with_priority(type_score_metric, type_weight_list, type_priority_list)
  567. score_type_dict[type1] = np.around(type_score, decimals = 2)
  568. score_comfort = round(score_comfort, 2)
  569. print("平顺性各指标基准值:", self.optimal_list)
  570. print(f"平顺性得分为:{score_comfort:.2f}分。")
  571. print(f"平顺性各类型得分为:{score_type_dict}。")
  572. print(f"平顺性各指标得分为:{score_metric_dict}。")
  573. return score_comfort, score_type_dict, score_metric_dict
  574. def zip_time_pairs(self, zip_list, upper_limit=9999):
  575. zip_time_pairs = zip(self.time_list, zip_list)
  576. zip_vs_time = [[x, upper_limit if y > upper_limit else y] for x, y in zip_time_pairs if not math.isnan(y)]
  577. return zip_vs_time
  578. def report_statistic(self):
  579. """
  580. Returns:
  581. """
  582. # report_dict = {
  583. # "name": "平顺性",
  584. # "weight": f"{self.weight * 100:.2f}%",
  585. # "weightDistribution": weight_distribution,
  586. # "score": score_comfort,
  587. # "level": grade_comfort,
  588. # 'discomfortCount': self.discomfort_count,
  589. # "description1": comf_description1,
  590. # "description2": comf_description2,
  591. # "description3": comf_description3,
  592. # "description4": comf_description4,
  593. #
  594. # "comfortLat": lat_dict,
  595. # "comfortLon": lon_dict,
  596. #
  597. # "speData": ego_speed_vs_time,
  598. # "speMarkLine": discomfort_slices,
  599. #
  600. # "accData": lon_acc_vs_time,
  601. # "accMarkLine": discomfort_acce_slices,
  602. #
  603. # "anvData": yawrate_vs_time,
  604. # "anvMarkLine": discomfort_zigzag_slices,
  605. #
  606. # "anaData": yawrate_roc_vs_time,
  607. # "anaMarkLine": discomfort_zigzag_slices,
  608. #
  609. # "curData": [cur_ego_path_vs_time, curvature_vs_time],
  610. # "curMarkLine": discomfort_shake_slices,
  611. # }
  612. # brakePedal_list = self.data_processed.driver_ctrl_data['brakePedal_list']
  613. # throttlePedal_list = self.data_processed.driver_ctrl_data['throttlePedal_list']
  614. # steeringWheel_list = self.data_processed.driver_ctrl_data['steeringWheel_list']
  615. #
  616. # # common parameter calculate
  617. # brake_vs_time = self.zip_time_pairs(brakePedal_list, 100)
  618. # throttle_vs_time = self.zip_time_pairs(throttlePedal_list, 100)
  619. # steering_vs_time = self.zip_time_pairs(steeringWheel_list)
  620. report_dict = {
  621. "name": "平顺性",
  622. "weight": f"{self.weight * 100:.2f}%",
  623. # 'discomfortCount': self.discomfort_count,
  624. }
  625. score_comfort, score_type_dict, score_metric_dict = self.comf_score_new()
  626. score_comfort = int(score_comfort) if int(score_comfort) == score_comfort else round(score_comfort, 2)
  627. grade_comfort = score_grade(score_comfort)
  628. report_dict["score"] = score_comfort
  629. report_dict["level"] = grade_comfort
  630. description = f"· 在平顺性方面,得分{score_comfort:.2f}分,表现{grade_comfort}。"
  631. is_good = True
  632. is_semicolon = False
  633. if any(score_metric_dict[metric] < 80 for metric in self.cadence_metric_list):
  634. is_good = False
  635. is_semicolon = True
  636. description += "线加速度变化剧烈,"
  637. tmp = [metric for metric in self.cadence_metric_list if score_metric_dict[metric] < 80]
  638. str_tmp = "、".join(tmp)
  639. description += f"有{str_tmp}情况,需重点优化"
  640. if any(score_metric_dict[metric] < 80 for metric in self.zigzag_metric_list):
  641. is_good = False
  642. if is_semicolon:
  643. description += ";"
  644. is_semicolon = False
  645. description += "角加速度变化剧烈,"
  646. tmp = [metric for metric in self.zigzag_metric_list if score_metric_dict[metric] < 80]
  647. str_tmp = "、".join(tmp)
  648. description += f"有{str_tmp}情况,需重点优化。"
  649. else:
  650. if is_semicolon:
  651. description += f"。"
  652. if is_good:
  653. description += f"线加速度和角加速度变化平顺,表现{grade_comfort}。"
  654. report_dict["description"] = replace_key_with_value(description, self.name_dict)
  655. # indexes
  656. description1 = f"次数:{len(self.zigzag_time)}次"
  657. if len(self.zigzag_time) > 0:
  658. print("self.zigzag_time is", self.zigzag_time)
  659. tmp_zigzag = [str("{:.2f}".format(t_zigzag[0]))+'s' for t_zigzag in self.zigzag_time]
  660. str_tmp_zigzag = "、".join(tmp_zigzag)
  661. description1 += f"\n在{str_tmp_zigzag}处发生画龙行为"
  662. else:
  663. description1 += f"\n没有发生画龙行为"
  664. description2 = f"次数:{self.cadence_count}次"
  665. if len(self.cadence_time) > 0:
  666. print("self.zigzag_time is", self.cadence_time)
  667. tmp_cadence = [str("{:.2f}".format(t_cadence[0]))+'s' for t_cadence in self.cadence_time]
  668. str_tmp_cadence = "、".join(tmp_cadence)
  669. description2 += f"\n在{str_tmp_cadence}处发生顿挫行为"
  670. else:
  671. description2 += f"\n没有发生顿挫行为"
  672. description3 = f"次数: {self.slam_brake_count + self.slam_accel_count}次"
  673. if len(self.slam_brake_time) > 0:
  674. # print("self.zigzag_time is", self.cadence_time)
  675. tmp_slam_brake = [str("{:.2f}".format(t_slam_brake[0]))+'s' for t_slam_brake in self.slam_brake_time]
  676. str_tmp_slam_brake = "、".join(tmp_slam_brake)
  677. description3 += f"\n在{str_tmp_slam_brake}处发生急刹行为"
  678. else:
  679. description3 += f"\n没有发生急刹行为"
  680. if len(self.slam_accel_time) > 0:
  681. slam_accel_brake = [str("{:.2f}".format(t_slam_accel[0]))+'s' for t_slam_accel in self.slam_accel_time]
  682. str_tmp_slam_accel = "、".join(slam_accel_brake)
  683. description3 += f"\n在{str_tmp_slam_accel}处发生急加速行为"
  684. else:
  685. description3 += f"\n没有发生急加速行为"
  686. description4 = f"次数: {self.speed_instruction_jump_count}次"
  687. if len(self.speed_instruction_jump_time) > 0:
  688. print("self.zigzag_time is", self.cadence_time)
  689. tmp_speed_instruction_jump = [str("{:.2f}".format(t_speed_instruction_jump[0]))+'s' for t_speed_instruction_jump in self.speed_instruction_jump_time]
  690. str_tmp_speed_instruction_jump = "、".join(tmp_speed_instruction_jump)
  691. description4 += f"\n在{str_tmp_speed_instruction_jump}处发生指令跳变"
  692. else:
  693. description4 += f"\n没有发生指令跳变"
  694. zigzag_index = {
  695. "weight": self.weight_type_dict['zigzag'],
  696. "score": score_type_dict['zigzag'],
  697. "description": description1
  698. }
  699. cadence_index = {
  700. "weight": self.weight_type_dict['cadence'],
  701. "score": score_type_dict['cadence'],
  702. "description": description2
  703. }
  704. sharpchangeofspeed_index = {
  705. "weight": self.weight_type_dict['sharpchangeofspeed'],
  706. "score": score_type_dict['sharpchangeofspeed'],
  707. "description": description3
  708. }
  709. speedInstruction_index = {
  710. "weight": self.weight_type_dict['comfortSpeed'],
  711. "score": score_type_dict['comfortSpeed'],
  712. "description": description4
  713. }
  714. indexes_dict = {
  715. "zigzag": zigzag_index,
  716. "cadence": cadence_index,
  717. "sharpchangeofspeed": sharpchangeofspeed_index,
  718. "comfortSpeed": speedInstruction_index
  719. }
  720. report_dict["indexes"] = indexes_dict
  721. # LinearAccelerate.png
  722. plt.figure(figsize=(12, 3))
  723. plt.plot(self.time_list, self.accel_list)
  724. plt.xlabel('时间(s)')
  725. plt.ylabel('线加速度(m/s^2)')
  726. # plt.legend()
  727. # 调整布局,消除空白边界
  728. plt.tight_layout()
  729. plt.savefig(os.path.join(self.resultPath, "LinearAccelerate.png"))
  730. plt.close()
  731. # AngularAccelerate.png
  732. plt.figure(figsize=(12, 3))
  733. plt.plot(self.time_list, self.accelH_list)
  734. plt.xlabel('时间(s)')
  735. plt.ylabel('角加速度(rad/s^2)')
  736. # plt.legend()
  737. # 调整布局,消除空白边界
  738. plt.tight_layout()
  739. plt.savefig(os.path.join(self.resultPath, "AngularAccelerate.png"))
  740. plt.close()
  741. # Speed.png
  742. plt.figure(figsize=(12, 3))
  743. plt.plot(self.time_list, self.speed_list)
  744. plt.xlabel('时间(s)')
  745. plt.ylabel('速度(m/s)')
  746. # plt.legend()
  747. # 调整布局,消除空白边界
  748. plt.tight_layout()
  749. plt.savefig(os.path.join(self.resultPath, "Speed.png"))
  750. plt.close()
  751. # commandSpeed.png draw
  752. plt.figure(figsize=(12, 3))
  753. plt.plot(self.time_list, self.commandSpeed_list)
  754. plt.xlabel('时间(s)')
  755. plt.ylabel('控制指令速度(m/s)')
  756. # plt.legend()
  757. # 调整布局,消除空白边界
  758. plt.tight_layout()
  759. plt.savefig(os.path.join(self.resultPath, "CommandSpeed.png"))
  760. plt.close()
  761. print(report_dict)
  762. return report_dict
  763. # def get_eval_data(self):
  764. # df = self.eval_data[
  765. # ['simTime', 'simFrame', 'playerId', 'ip_acc', 'ip_dec', 'slam_brake', 'slam_accel', 'cadence']].copy()
  766. # return df