|
|
@@ -0,0 +1,1304 @@
|
|
|
+#!/usr/bin/env python
|
|
|
+# -*- coding: utf-8 -*-
|
|
|
+##################################################################
|
|
|
+#
|
|
|
+# Copyright (c) 2023 CICV, Inc. All Rights Reserved
|
|
|
+#
|
|
|
+##################################################################
|
|
|
+"""
|
|
|
+@Authors: zhanghaiwen(zhanghaiwen@china-icv.cn), yangzihao(yangzihao@china-icv.cn)
|
|
|
+@Data: 2023/06/25
|
|
|
+@Last Modified: 2023/06/25
|
|
|
+@Summary: Comfort metrics
|
|
|
+"""
|
|
|
+
|
|
|
+import sys
|
|
|
+import math
|
|
|
+import pandas as pd
|
|
|
+import numpy as np
|
|
|
+import scipy.signal
|
|
|
+
|
|
|
+sys.path.append('../common')
|
|
|
+sys.path.append('../modules')
|
|
|
+sys.path.append('../results')
|
|
|
+
|
|
|
+from data_info import DataInfoList
|
|
|
+from score_weight import cal_score_with_priority, cal_weight_from_80
|
|
|
+from common import get_interpolation, score_grade, string_concatenate, replace_key_with_value, get_frame_with_time, \
|
|
|
+ score_over_100
|
|
|
+
|
|
|
+
|
|
|
+def peak_valley_decorator(method):
|
|
|
+ def wrapper(self, *args, **kwargs):
|
|
|
+ peak_valley = self._peak_valley_determination(self.df)
|
|
|
+ pv_list = self.df.loc[peak_valley, ['simTime', 'speedH']].values.tolist()
|
|
|
+ if len(pv_list) != 0:
|
|
|
+ flag = True
|
|
|
+ p_last = pv_list[0]
|
|
|
+
|
|
|
+ for i in range(1, len(pv_list)):
|
|
|
+ p_curr = pv_list[i]
|
|
|
+
|
|
|
+ if self._peak_valley_judgment(p_last, p_curr):
|
|
|
+ # method(self, p_curr, p_last)
|
|
|
+ method(self, p_curr, p_last, flag, *args, **kwargs)
|
|
|
+ else:
|
|
|
+ p_last = p_curr
|
|
|
+
|
|
|
+ return method
|
|
|
+ else:
|
|
|
+ flag = False
|
|
|
+ p_curr = [0, 0]
|
|
|
+ p_last = [0, 0]
|
|
|
+ method(self, p_curr, p_last, flag, *args, **kwargs)
|
|
|
+ return method
|
|
|
+
|
|
|
+ return wrapper
|
|
|
+
|
|
|
+
|
|
|
+class Comfort(object):
|
|
|
+ """
|
|
|
+ Class for achieving comfort metrics for autonomous driving.
|
|
|
+
|
|
|
+ Attributes:
|
|
|
+ dataframe: Vehicle driving data, stored in dataframe format.
|
|
|
+ """
|
|
|
+
|
|
|
+ def __init__(self, data_processed, custom_data, scoreModel):
|
|
|
+ self.eval_data = pd.DataFrame()
|
|
|
+ self.data_processed = data_processed
|
|
|
+ self.scoreModel = scoreModel
|
|
|
+
|
|
|
+ self.data = data_processed.obj_data[1]
|
|
|
+ self.mileage = data_processed.report_info['mileage']
|
|
|
+ self.ego_df = pd.DataFrame()
|
|
|
+ self.discomfort_df = pd.DataFrame(columns=['start_time', 'end_time', 'start_frame', 'end_frame', 'type'])
|
|
|
+ self.df_drivectrl = data_processed.driver_ctrl_df
|
|
|
+
|
|
|
+ self.config = data_processed.config
|
|
|
+ comfort_config = self.config.config['comfort']
|
|
|
+ self.comfort_config = comfort_config
|
|
|
+
|
|
|
+ # common data
|
|
|
+ self.bulitin_metric_list = self.config.builtinMetricList
|
|
|
+
|
|
|
+ # dimension data
|
|
|
+ self.weight_custom = comfort_config['weightCustom']
|
|
|
+ self.metric_list = comfort_config['metric']
|
|
|
+ self.type_list = comfort_config['type']
|
|
|
+ self.type_name_dict = comfort_config['typeName']
|
|
|
+ self.name_dict = comfort_config['name']
|
|
|
+ self.unit_dict = comfort_config['unit']
|
|
|
+
|
|
|
+ # custom metric data
|
|
|
+ self.customMetricParam = comfort_config['customMetricParam']
|
|
|
+ self.custom_metric_list = list(self.customMetricParam.keys())
|
|
|
+ self.custom_data = custom_data
|
|
|
+ self.custom_param_dict = {}
|
|
|
+
|
|
|
+ # score data
|
|
|
+ self.weight = comfort_config['weightDimension']
|
|
|
+
|
|
|
+ self.weight_type_dict = comfort_config['typeWeight']
|
|
|
+ self.weight_type_list = comfort_config['typeWeightList']
|
|
|
+
|
|
|
+ self.weight_dict = comfort_config['weight']
|
|
|
+ self.weight_list = comfort_config['weightList']
|
|
|
+
|
|
|
+ self.priority_dict = comfort_config['priority']
|
|
|
+ self.priority_list = comfort_config['priorityList']
|
|
|
+
|
|
|
+ self.kind_dict = comfort_config['kind']
|
|
|
+ self.optimal_dict = comfort_config['optimal']
|
|
|
+ self.optimal1_dict = self.optimal_dict[0]
|
|
|
+ self.optimal2_dict = self.optimal_dict[1]
|
|
|
+ self.optimal3_dict = self.optimal_dict[2]
|
|
|
+ self.multiple_dict = comfort_config['multiple']
|
|
|
+ self.kind_list = comfort_config['kindList']
|
|
|
+ self.optimal_list = comfort_config['optimalList']
|
|
|
+ self.multiple_list = comfort_config['multipleList']
|
|
|
+
|
|
|
+ # metric data
|
|
|
+ self.metric_dict = comfort_config['typeMetricDict']
|
|
|
+ self.lat_metric_list = self.metric_dict['comfortLat']
|
|
|
+ self.lon_metric_list = self.metric_dict['comfortLon']
|
|
|
+ # self.lat_metric_list = ["zigzag", "shake"]
|
|
|
+ # self.lon_metric_list = ["cadence", "slamBrake", "slamAccelerate"]
|
|
|
+
|
|
|
+ self.time_list = data_processed.driver_ctrl_data['time_list']
|
|
|
+ self.frame_list = data_processed.driver_ctrl_data['frame_list']
|
|
|
+
|
|
|
+ self.count_dict = {}
|
|
|
+ self.duration_dict = {}
|
|
|
+ self.strength_dict = {}
|
|
|
+
|
|
|
+ self.discomfort_count = 0
|
|
|
+ self.zigzag_count = 0
|
|
|
+ self.shake_count = 0
|
|
|
+ self.cadence_count = 0
|
|
|
+ self.slam_brake_count = 0
|
|
|
+ self.slam_accel_count = 0
|
|
|
+
|
|
|
+ self.zigzag_strength = 0
|
|
|
+ self.shake_strength = 0
|
|
|
+ self.cadence_strength = 0
|
|
|
+ self.slam_brake_strength = 0
|
|
|
+ self.slam_accel_strength = 0
|
|
|
+
|
|
|
+ self.discomfort_duration = 0
|
|
|
+ self.zigzag_duration = 0
|
|
|
+ self.shake_duration = 0
|
|
|
+ self.cadence_duration = 0
|
|
|
+ self.slam_brake_duration = 0
|
|
|
+ self.slam_accel_duration = 0
|
|
|
+
|
|
|
+ self.zigzag_time_list = []
|
|
|
+ self.zigzag_frame_list = []
|
|
|
+ self.zigzag_stre_list = []
|
|
|
+ self.cur_ego_path_list = []
|
|
|
+ self.curvature_list = []
|
|
|
+
|
|
|
+ self._get_data()
|
|
|
+ self._comf_param_cal()
|
|
|
+
|
|
|
+ def _get_data(self):
|
|
|
+ """
|
|
|
+
|
|
|
+ """
|
|
|
+ comfort_info_list = DataInfoList.COMFORT_INFO
|
|
|
+ self.ego_df = self.data[comfort_info_list].copy()
|
|
|
+ # self.df = self.ego_df.set_index('simFrame') # 索引是csv原索引
|
|
|
+ self.df = self.ego_df.reset_index(drop=True) # 索引是csv原索引
|
|
|
+
|
|
|
+ def _cal_cur_ego_path(self, row):
|
|
|
+ try:
|
|
|
+ divide = (row['speedX'] ** 2 + row['speedY'] ** 2) ** (3 / 2)
|
|
|
+ if not divide:
|
|
|
+ res = None
|
|
|
+ else:
|
|
|
+ res = (row['speedX'] * row['accelY'] - row['speedY'] * row['accelX']) / divide
|
|
|
+ except:
|
|
|
+ res = None
|
|
|
+ return res
|
|
|
+
|
|
|
+ def _comf_param_cal(self):
|
|
|
+ """
|
|
|
+
|
|
|
+ """
|
|
|
+ UNIT_DISTANCE = 100000
|
|
|
+
|
|
|
+ for i in range(len(self.optimal_list)):
|
|
|
+ if i % 3 == 2:
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ self.optimal_list[i] = round(self.optimal_list[i] * self.mileage / UNIT_DISTANCE, 8)
|
|
|
+
|
|
|
+ self.optimal1_dict = {key: value * self.mileage / UNIT_DISTANCE for key, value in
|
|
|
+ self.optimal1_dict.copy().items()}
|
|
|
+ self.optimal2_dict = {key: value * self.mileage / UNIT_DISTANCE for key, value in
|
|
|
+ self.optimal2_dict.copy().items()}
|
|
|
+
|
|
|
+ # [log]
|
|
|
+ self.ego_df['ip_acc'] = self.ego_df['v'].apply(get_interpolation, point1=[18, 4], point2=[72, 2])
|
|
|
+ self.ego_df['ip_dec'] = self.ego_df['v'].apply(get_interpolation, point1=[18, -5], point2=[72, -3.5])
|
|
|
+ self.ego_df['slam_brake'] = (self.ego_df['lon_acc'] - self.ego_df['ip_dec']).apply(
|
|
|
+ lambda x: 1 if x < 0 else 0)
|
|
|
+ self.ego_df['slam_accel'] = (self.ego_df['lon_acc'] - self.ego_df['ip_acc']).apply(
|
|
|
+ lambda x: 1 if x > 0 else 0)
|
|
|
+ self.ego_df['cadence'] = self.ego_df.apply(
|
|
|
+ lambda row: self._cadence_process_new(row['lon_acc'], row['ip_acc'], row['ip_dec']), axis=1)
|
|
|
+
|
|
|
+ # for shake detector
|
|
|
+ self.ego_df['cur_ego_path'] = self.ego_df.apply(self._cal_cur_ego_path, axis=1)
|
|
|
+ self.ego_df['curvHor'] = self.ego_df['curvHor'].astype('float')
|
|
|
+ self.ego_df['cur_diff'] = (self.ego_df['cur_ego_path'] - self.ego_df['curvHor']).abs()
|
|
|
+ self.ego_df['R'] = self.ego_df['curvHor'].apply(lambda x: 10000 if x == 0 else 1 / x)
|
|
|
+ self.ego_df['R_ego'] = self.ego_df['cur_ego_path'].apply(lambda x: 10000 if x == 0 else 1 / x)
|
|
|
+ self.ego_df['R_diff'] = (self.ego_df['R_ego'] - self.ego_df['R']).abs()
|
|
|
+
|
|
|
+ self.cur_ego_path_list = self.ego_df['cur_ego_path'].values.tolist()
|
|
|
+ self.curvature_list = self.ego_df['curvHor'].values.tolist()
|
|
|
+
|
|
|
+ def _peak_valley_determination(self, df):
|
|
|
+ """
|
|
|
+ Determine the peak and valley of the vehicle based on its current angular velocity.
|
|
|
+
|
|
|
+ Parameters:
|
|
|
+ df: Dataframe containing the vehicle angular velocity.
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ peak_valley: List of indices representing peaks and valleys.
|
|
|
+ """
|
|
|
+
|
|
|
+ peaks, _ = scipy.signal.find_peaks(df['speedH'], height=0.01, distance=1, prominence=0.01)
|
|
|
+ valleys, _ = scipy.signal.find_peaks(-df['speedH'], height=0.01, distance=1, prominence=0.01)
|
|
|
+ peak_valley = sorted(list(peaks) + list(valleys))
|
|
|
+
|
|
|
+ return peak_valley
|
|
|
+
|
|
|
+ def _peak_valley_judgment(self, p_last, p_curr, tw=6000, avg=0.4):
|
|
|
+ """
|
|
|
+ Determine if the given peaks and valleys satisfy certain conditions.
|
|
|
+
|
|
|
+ Parameters:
|
|
|
+ p_last: Previous peak or valley data point.
|
|
|
+ p_curr: Current peak or valley data point.
|
|
|
+ tw: Threshold time difference between peaks and valleys.
|
|
|
+ avg: Angular velocity gap threshold.
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ Boolean indicating whether the conditions are satisfied.
|
|
|
+ """
|
|
|
+ t_diff = p_curr[0] - p_last[0]
|
|
|
+ v_diff = abs(p_curr[1] - p_last[1])
|
|
|
+ s = p_curr[1] * p_last[1]
|
|
|
+
|
|
|
+ zigzag_flag = t_diff < tw and v_diff > avg and s < 0
|
|
|
+ if zigzag_flag and ([p_last[0], p_curr[0]] not in self.zigzag_time_list):
|
|
|
+ self.zigzag_time_list.append([p_last[0], p_curr[0]])
|
|
|
+ return zigzag_flag
|
|
|
+
|
|
|
+ @peak_valley_decorator
|
|
|
+ def zigzag_count_func(self, p_curr, p_last, flag=True):
|
|
|
+ """
|
|
|
+ Count the number of zigzag movements.
|
|
|
+
|
|
|
+ Parameters:
|
|
|
+ df: Input dataframe data.
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ zigzag_count: Number of zigzag movements.
|
|
|
+ """
|
|
|
+ if flag:
|
|
|
+ self.zigzag_count += 1
|
|
|
+ else:
|
|
|
+ self.zigzag_count += 0
|
|
|
+
|
|
|
+ @peak_valley_decorator
|
|
|
+ def cal_zigzag_strength_strength(self, p_curr, p_last, flag=True):
|
|
|
+ """
|
|
|
+ Calculate various strength statistics.
|
|
|
+
|
|
|
+ Returns:
|
|
|
+ Tuple containing maximum strength, minimum strength,
|
|
|
+ average strength, and 99th percentile strength.
|
|
|
+ """
|
|
|
+ if flag:
|
|
|
+ v_diff = abs(p_curr[1] - p_last[1])
|
|
|
+ t_diff = p_curr[0] - p_last[0]
|
|
|
+ self.zigzag_stre_list.append(v_diff / t_diff) # 平均角加速度
|
|
|
+ else:
|
|
|
+ self.zigzag_stre_list = []
|
|
|
+
|
|
|
+ def _shake_detector(self, Cr_diff=0.05, T_diff=0.39):
|
|
|
+ """
|
|
|
+ ego车横向加速度ax;
|
|
|
+ ego车轨迹横向曲率;
|
|
|
+ ego车轨迹曲率变化率;
|
|
|
+ ego车所在车lane曲率;
|
|
|
+ ego车所在车lane曲率变化率;
|
|
|
+ 转向灯(暂时存疑,可不用)Cr_diff = 0.1, T_diff = 0.04
|
|
|
+ 求解曲率公式k(t) = (x'(t) * y''(t) - y'(t) * x''(t)) / ((x'(t))^2 + (y'(t))^2)^(3/2)
|
|
|
+ """
|
|
|
+ time_list = []
|
|
|
+ frame_list = []
|
|
|
+ shake_time_list = []
|
|
|
+
|
|
|
+ df = self.ego_df.copy()
|
|
|
+ df = df[df['cur_diff'] > Cr_diff]
|
|
|
+ df['frame_ID_diff'] = df['simFrame'].diff() # 找出行车轨迹曲率与道路曲率之差大于阈值的数据段
|
|
|
+ filtered_df = df[df.frame_ID_diff > T_diff] # 此处是用大间隔区分多次晃动情景 。
|
|
|
+
|
|
|
+ row_numbers = filtered_df.index.tolist()
|
|
|
+ cut_column = pd.cut(df.index, bins=row_numbers)
|
|
|
+
|
|
|
+ grouped = df.groupby(cut_column)
|
|
|
+ dfs = {}
|
|
|
+ for name, group in grouped:
|
|
|
+ dfs[name] = group.reset_index(drop=True)
|
|
|
+
|
|
|
+ for name, df_group in dfs.items():
|
|
|
+ # 直道,未主动换道
|
|
|
+ df_group['curvHor'] = df_group['curvHor'].abs()
|
|
|
+ df_group_straight = df_group[(df_group.lightMask == 0) & (df_group.curvHor < 0.001)]
|
|
|
+ if not df_group_straight.empty:
|
|
|
+ tmp_list = df_group_straight['simTime'].values
|
|
|
+ # shake_time_list.append([tmp_list[0], tmp_list[-1]])
|
|
|
+ time_list.extend(df_group_straight['simTime'].values)
|
|
|
+ frame_list.extend(df_group_straight['simFrame'].values)
|
|
|
+ self.shake_count = self.shake_count + 1
|
|
|
+
|
|
|
+ # 打转向灯,道路为直道,此时晃动判断标准车辆曲率变化率为一个更大的阈值
|
|
|
+ df_group_change_lane = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'] < 0.001)]
|
|
|
+ df_group_change_lane_data = df_group_change_lane[df_group_change_lane.cur_diff > Cr_diff + 0.2]
|
|
|
+ if not df_group_change_lane_data.empty:
|
|
|
+ tmp_list = df_group_change_lane_data['simTime'].values
|
|
|
+ # shake_time_list.append([tmp_list[0], tmp_list[-1]])
|
|
|
+ time_list.extend(df_group_change_lane_data['simTime'].values)
|
|
|
+ frame_list.extend(df_group_change_lane_data['simFrame'].values)
|
|
|
+ self.shake_count = self.shake_count + 1
|
|
|
+
|
|
|
+ # 转弯,打转向灯
|
|
|
+ df_group_turn = df_group[(df_group['lightMask'] != 0) & (df_group['curvHor'].abs() > 0.001)]
|
|
|
+ df_group_turn_data = df_group_turn[df_group_turn.cur_diff.abs() > Cr_diff + 0.1]
|
|
|
+ if not df_group_turn_data.empty:
|
|
|
+ tmp_list = df_group_turn_data['simTime'].values
|
|
|
+ # shake_time_list.append([tmp_list[0], tmp_list[-1]])
|
|
|
+ time_list.extend(df_group_turn_data['simTime'].values)
|
|
|
+ frame_list.extend(df_group_turn_data['simFrame'].values)
|
|
|
+ self.shake_count = self.shake_count + 1
|
|
|
+
|
|
|
+ TIME_RANGE = 1
|
|
|
+ t_list = time_list
|
|
|
+ f_list = frame_list
|
|
|
+ group_time = []
|
|
|
+ group_frame = []
|
|
|
+ sub_group_time = []
|
|
|
+ sub_group_frame = []
|
|
|
+ for i in range(len(f_list)):
|
|
|
+ if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE:
|
|
|
+ sub_group_time.append(t_list[i])
|
|
|
+ sub_group_frame.append(f_list[i])
|
|
|
+ else:
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ sub_group_time = [t_list[i]]
|
|
|
+ sub_group_frame = [f_list[i]]
|
|
|
+
|
|
|
+ # group_time.append(sub_group_time)
|
|
|
+ # group_frame.append(sub_group_frame)
|
|
|
+ # group_time = [g for g in group_time if len(g) >= 3]
|
|
|
+ # group_frame = [g for g in group_frame if len(g) >= 3]
|
|
|
+ #
|
|
|
+ # group_time = []
|
|
|
+ # sub_group = []
|
|
|
+ # for i in range(len(t_list)):
|
|
|
+ # if not sub_group or t_list[i] - t_list[i - 1] <= 0.2:
|
|
|
+ # sub_group.append(t_list[i])
|
|
|
+ # else:
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # sub_group = [t_list[i]]
|
|
|
+ #
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # group_time = [g for g in group_time if len(g) >= 3]
|
|
|
+
|
|
|
+ # 输出图表值
|
|
|
+ shake_time = [[g[0], g[-1]] for g in group_time]
|
|
|
+ shake_frame = [[g[0], g[-1]] for g in group_frame]
|
|
|
+ self.shake_count = len(shake_time)
|
|
|
+
|
|
|
+ if shake_time:
|
|
|
+ time_df = pd.DataFrame(shake_time, columns=['start_time', 'end_time'])
|
|
|
+ frame_df = pd.DataFrame(shake_frame, columns=['start_frame', 'end_frame'])
|
|
|
+ discomfort_df = pd.concat([time_df, frame_df], axis=1)
|
|
|
+ discomfort_df['type'] = 'shake'
|
|
|
+ self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
|
|
|
+
|
|
|
+ return time_list
|
|
|
+
|
|
|
+ def _cadence_process(self, lon_acc_roc, ip_dec_roc):
|
|
|
+ if abs(lon_acc_roc) >= abs(ip_dec_roc) or abs(lon_acc_roc) < 1:
|
|
|
+ return np.nan
|
|
|
+ # elif abs(lon_acc_roc) == 0:
|
|
|
+ elif abs(lon_acc_roc) == 0:
|
|
|
+ return 0
|
|
|
+ elif lon_acc_roc > 0 and lon_acc_roc < -ip_dec_roc:
|
|
|
+ return 1
|
|
|
+ elif lon_acc_roc < 0 and lon_acc_roc > ip_dec_roc:
|
|
|
+ return -1
|
|
|
+
|
|
|
+ def _cadence_process_new(self, lon_acc, ip_acc, ip_dec):
|
|
|
+ if abs(lon_acc) < 1 or lon_acc > ip_acc or lon_acc < ip_dec:
|
|
|
+ return np.nan
|
|
|
+ # elif abs(lon_acc_roc) == 0:
|
|
|
+ elif abs(lon_acc) == 0:
|
|
|
+ return 0
|
|
|
+ elif lon_acc > 0 and lon_acc < ip_acc:
|
|
|
+ return 1
|
|
|
+ elif lon_acc < 0 and lon_acc > ip_dec:
|
|
|
+ return -1
|
|
|
+
|
|
|
+ def _cadence_detector(self):
|
|
|
+ """
|
|
|
+ # 加速度突变:先加后减,先减后加,先加然后停,先减然后停
|
|
|
+ # 顿挫:2s内多次加速度变化率突变
|
|
|
+ # 求出每一个特征点,然后提取,然后将每一个特征点后面的2s做一个窗口,统计频率,避免无效运算
|
|
|
+
|
|
|
+ # 将特征点筛选出来
|
|
|
+ # 将特征点时间作为聚类标准,大于1s的pass,小于等于1s的聚类到一个分组
|
|
|
+ # 去掉小于3个特征点的分组
|
|
|
+ """
|
|
|
+ # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'cadence']].copy()
|
|
|
+ data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'cadence']].copy()
|
|
|
+ time_list = data['simTime'].values.tolist()
|
|
|
+
|
|
|
+ data = data[data['cadence'] != np.nan]
|
|
|
+ data['cadence_diff'] = data['cadence'].diff()
|
|
|
+ data.dropna(subset='cadence_diff', inplace=True)
|
|
|
+ data = data[data['cadence_diff'] != 0]
|
|
|
+
|
|
|
+ t_list = data['simTime'].values.tolist()
|
|
|
+ f_list = data['simFrame'].values.tolist()
|
|
|
+
|
|
|
+ TIME_RANGE = 1
|
|
|
+ group_time = []
|
|
|
+ group_frame = []
|
|
|
+ sub_group_time = []
|
|
|
+ sub_group_frame = []
|
|
|
+ for i in range(len(f_list)):
|
|
|
+ if not sub_group_time or t_list[i] - t_list[i - 1] <= TIME_RANGE: # 特征点相邻一秒内的,算作同一组顿挫
|
|
|
+ sub_group_time.append(t_list[i])
|
|
|
+ sub_group_frame.append(f_list[i])
|
|
|
+ else:
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ sub_group_time = [t_list[i]]
|
|
|
+ sub_group_frame = [f_list[i]]
|
|
|
+
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ group_time = [g for g in group_time if len(g) >= 1] # 有一次特征点则算作一次顿挫
|
|
|
+ group_frame = [g for g in group_frame if len(g) >= 1]
|
|
|
+
|
|
|
+ # group_time = []
|
|
|
+ # sub_group = []
|
|
|
+ #
|
|
|
+ # for i in range(len(f_list)):
|
|
|
+ # if not sub_group or t_list[i] - t_list[i - 1] <= 1: # 特征点相邻一秒内的,算作同一组顿挫
|
|
|
+ # sub_group.append(t_list[i])
|
|
|
+ # else:
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # sub_group = [t_list[i]]
|
|
|
+ #
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # group_time = [g for g in group_time if len(g) >= 1] # 有一次特征点则算作一次顿挫
|
|
|
+
|
|
|
+ # 输出图表值
|
|
|
+ cadence_time = [[g[0], g[-1]] for g in group_time]
|
|
|
+ cadence_frame = [[g[0], g[-1]] for g in group_frame]
|
|
|
+
|
|
|
+ if cadence_time:
|
|
|
+ time_df = pd.DataFrame(cadence_time, columns=['start_time', 'end_time'])
|
|
|
+ frame_df = pd.DataFrame(cadence_frame, columns=['start_frame', 'end_frame'])
|
|
|
+ discomfort_df = pd.concat([time_df, frame_df], axis=1)
|
|
|
+ discomfort_df['type'] = 'cadence'
|
|
|
+ self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
|
|
|
+
|
|
|
+ # 将顿挫组的起始时间为组重新统计时间
|
|
|
+ cadence_time_list = [time for pair in cadence_time for time in time_list if pair[0] <= time <= pair[1]]
|
|
|
+
|
|
|
+ # time_list = [element for sublist in group_time for element in sublist]
|
|
|
+ # merged_list = [element for sublist in res_group for element in sublist]
|
|
|
+ # res_df = data[data['simTime'].isin(merged_list)]
|
|
|
+
|
|
|
+ stre_list = []
|
|
|
+ freq_list = []
|
|
|
+ for g in group_time:
|
|
|
+ # calculate strength
|
|
|
+ g_df = data[data['simTime'].isin(g)]
|
|
|
+ strength = g_df['lon_acc'].abs().mean()
|
|
|
+ stre_list.append(strength)
|
|
|
+
|
|
|
+ # calculate frequency
|
|
|
+ cnt = len(g)
|
|
|
+ t_start = g_df['simTime'].iloc[0]
|
|
|
+ t_end = g_df['simTime'].iloc[-1]
|
|
|
+ t_delta = t_end - t_start
|
|
|
+ frequency = cnt / t_delta
|
|
|
+ freq_list.append(frequency)
|
|
|
+
|
|
|
+ self.cadence_count = len(freq_list)
|
|
|
+ cadence_stre = sum(stre_list) / len(stre_list) if stre_list else 0
|
|
|
+
|
|
|
+ return cadence_time_list
|
|
|
+
|
|
|
+ def _slam_brake_detector(self):
|
|
|
+ # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
|
|
|
+ # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_dec_roc', 'slam_brake']].copy()
|
|
|
+ data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'lon_acc_roc', 'ip_dec', 'slam_brake']].copy()
|
|
|
+ # data['slam_diff'] = data['slam_brake'].diff()
|
|
|
+ # res_df = data[data['slam_diff'] == 1]
|
|
|
+
|
|
|
+ res_df = data[data['slam_brake'] == 1]
|
|
|
+ t_list = res_df['simTime'].values
|
|
|
+ f_list = res_df['simFrame'].values.tolist()
|
|
|
+
|
|
|
+ TIME_RANGE = 1
|
|
|
+ group_time = []
|
|
|
+ group_frame = []
|
|
|
+ sub_group_time = []
|
|
|
+ sub_group_frame = []
|
|
|
+ for i in range(len(f_list)):
|
|
|
+ if not sub_group_time or f_list[i] - f_list[i - 1] <= TIME_RANGE: # 连续帧的算作同一组急刹
|
|
|
+ sub_group_time.append(t_list[i])
|
|
|
+ sub_group_frame.append(f_list[i])
|
|
|
+ else:
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ sub_group_time = [t_list[i]]
|
|
|
+ sub_group_frame = [f_list[i]]
|
|
|
+
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急刹
|
|
|
+ group_frame = [g for g in group_frame if len(g) >= 2]
|
|
|
+
|
|
|
+ # group_time = []
|
|
|
+ # sub_group = []
|
|
|
+ #
|
|
|
+ # for i in range(len(f_list)):
|
|
|
+ # if not sub_group or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急刹
|
|
|
+ # sub_group.append(t_list[i])
|
|
|
+ # else:
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # sub_group = [t_list[i]]
|
|
|
+ #
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急刹
|
|
|
+
|
|
|
+ # 输出图表值
|
|
|
+ slam_brake_time = [[g[0], g[-1]] for g in group_time]
|
|
|
+ slam_brake_frame = [[g[0], g[-1]] for g in group_frame]
|
|
|
+
|
|
|
+ if slam_brake_time:
|
|
|
+ time_df = pd.DataFrame(slam_brake_time, columns=['start_time', 'end_time'])
|
|
|
+ frame_df = pd.DataFrame(slam_brake_frame, columns=['start_frame', 'end_frame'])
|
|
|
+ discomfort_df = pd.concat([time_df, frame_df], axis=1)
|
|
|
+ discomfort_df['type'] = 'slam_brake'
|
|
|
+ self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
|
|
|
+
|
|
|
+ time_list = [element for sublist in group_time for element in sublist]
|
|
|
+ self.slam_brake_count = len(group_time) # / self.mileage # * 1000000
|
|
|
+ return time_list
|
|
|
+
|
|
|
+ def _slam_accel_detector(self):
|
|
|
+ # 统计急刹全为1的分段的个数,记录分段开头的frame_ID
|
|
|
+ # data = self.ego_df[['simTime', 'simFrame', 'lon_acc_roc', 'ip_acc_roc', 'slam_accel']].copy()
|
|
|
+ data = self.ego_df[['simTime', 'simFrame', 'lon_acc', 'ip_acc', 'slam_accel']].copy()
|
|
|
+ # data['slam_diff'] = data['slam_accel'].diff()
|
|
|
+ # res_df = data.loc[data['slam_diff'] == 1]
|
|
|
+
|
|
|
+ res_df = data.loc[data['slam_accel'] == 1]
|
|
|
+ t_list = res_df['simTime'].values
|
|
|
+ f_list = res_df['simFrame'].values.tolist()
|
|
|
+
|
|
|
+ group_time = []
|
|
|
+ group_frame = []
|
|
|
+ sub_group_time = []
|
|
|
+ sub_group_frame = []
|
|
|
+ for i in range(len(f_list)):
|
|
|
+ if not group_time or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急加速
|
|
|
+ sub_group_time.append(t_list[i])
|
|
|
+ sub_group_frame.append(f_list[i])
|
|
|
+ else:
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ sub_group_time = [t_list[i]]
|
|
|
+ sub_group_frame = [f_list[i]]
|
|
|
+
|
|
|
+ group_time.append(sub_group_time)
|
|
|
+ group_frame.append(sub_group_frame)
|
|
|
+ group_time = [g for g in group_time if len(g) >= 2]
|
|
|
+ group_frame = [g for g in group_frame if len(g) >= 2]
|
|
|
+
|
|
|
+ # group_time = []
|
|
|
+ # sub_group = []
|
|
|
+ #
|
|
|
+ # for i in range(len(f_list)):
|
|
|
+ # if not sub_group or f_list[i] - f_list[i - 1] <= 1: # 连续帧的算作同一组急加速
|
|
|
+ # sub_group.append(t_list[i])
|
|
|
+ # else:
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # sub_group = [t_list[i]]
|
|
|
+ #
|
|
|
+ # group_time.append(sub_group)
|
|
|
+ # group_time = [g for g in group_time if len(g) >= 2] # 达到两帧算作一次急加速
|
|
|
+
|
|
|
+ # 输出图表值
|
|
|
+ slam_accel_time = [[g[0], g[-1]] for g in group_time]
|
|
|
+ slam_accel_frame = [[g[0], g[-1]] for g in group_frame]
|
|
|
+
|
|
|
+ if slam_accel_time:
|
|
|
+ time_df = pd.DataFrame(slam_accel_time, columns=['start_time', 'end_time'])
|
|
|
+ frame_df = pd.DataFrame(slam_accel_frame, columns=['start_frame', 'end_frame'])
|
|
|
+ discomfort_df = pd.concat([time_df, frame_df], axis=1)
|
|
|
+ discomfort_df['type'] = 'slam_accel'
|
|
|
+ self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
|
|
|
+
|
|
|
+ time_list = [element for sublist in group_time for element in sublist]
|
|
|
+ self.slam_accel_count = len(group_time) # / self.mileage # * 1000000
|
|
|
+ return time_list
|
|
|
+
|
|
|
+ def comf_statistic(self):
|
|
|
+ """
|
|
|
+
|
|
|
+ """
|
|
|
+ df = self.ego_df[['simTime', 'cur_diff', 'lon_acc', 'lon_acc_roc', 'accelH']].copy()
|
|
|
+
|
|
|
+ self.zigzag_count_func()
|
|
|
+ self.cal_zigzag_strength_strength()
|
|
|
+ if self.zigzag_time_list:
|
|
|
+ zigzag_df = pd.DataFrame(self.zigzag_time_list, columns=['start_time', 'end_time'])
|
|
|
+ zigzag_df = get_frame_with_time(zigzag_df, self.ego_df)
|
|
|
+ zigzag_df['type'] = 'zigzag'
|
|
|
+ self.discomfort_df = pd.concat([self.discomfort_df, zigzag_df], ignore_index=True)
|
|
|
+ # discomfort_df = pd.concat([time_df, frame_df], axis=1)
|
|
|
+ # self.discomfort_df = pd.concat([self.discomfort_df, discomfort_df], ignore_index=True)
|
|
|
+
|
|
|
+ zigzag_t_list = []
|
|
|
+ # 只有[t_start, t_end]数对,要提取为完整time list
|
|
|
+ t_list = df['simTime'].values.tolist()
|
|
|
+ for t_start, t_end in self.zigzag_time_list:
|
|
|
+ index_1 = t_list.index(t_start)
|
|
|
+ index_2 = t_list.index(t_end)
|
|
|
+ zigzag_t_list.extend(t_list[index_1:index_2 + 1])
|
|
|
+ zigzag_t_list = list(set(zigzag_t_list))
|
|
|
+ shake_t_list = self._shake_detector()
|
|
|
+ cadence_t_list = self._cadence_detector()
|
|
|
+ slam_brake_t_list = self._slam_brake_detector()
|
|
|
+ slam_accel_t_list = self._slam_accel_detector()
|
|
|
+
|
|
|
+ # comfort_time_dict = {
|
|
|
+ # 'zigzag_time_list': zigzag_t_list,
|
|
|
+ # 'shake_time_list': shake_t_list,
|
|
|
+ # 'cadence_time_list': cadence_t_list,
|
|
|
+ # 'slam_brake_time_list': slam_brake_t_list,
|
|
|
+ # 'slam_accelerate_time_list': slam_accel_t_list
|
|
|
+ # }
|
|
|
+
|
|
|
+ discomfort_time_list = zigzag_t_list + shake_t_list + cadence_t_list + slam_brake_t_list + slam_accel_t_list
|
|
|
+ discomfort_time_list = sorted(discomfort_time_list) # 排序
|
|
|
+ discomfort_time_list = list(set(discomfort_time_list)) # 去重
|
|
|
+
|
|
|
+ # TIME_DIFF = self.time_list[3] - self.time_list[2]
|
|
|
+ # TIME_DIFF = 0.4
|
|
|
+ FREQUENCY = 100
|
|
|
+ TIME_DIFF = 1 / FREQUENCY
|
|
|
+ self.discomfort_duration = len(discomfort_time_list) * TIME_DIFF
|
|
|
+
|
|
|
+ df['flag_zigzag'] = df['simTime'].apply(lambda x: 1 if x in zigzag_t_list else 0)
|
|
|
+ df['flag_shake'] = df['simTime'].apply(lambda x: 1 if x in shake_t_list else 0)
|
|
|
+ df['flag_cadence'] = df['simTime'].apply(lambda x: 1 if x in cadence_t_list else 0)
|
|
|
+ df['flag_slam_brake'] = df['simTime'].apply(lambda x: 1 if x in slam_brake_t_list else 0)
|
|
|
+ df['flag_slam_accel'] = df['simTime'].apply(lambda x: 1 if x in slam_accel_t_list else 0)
|
|
|
+
|
|
|
+ # hectokilometer = 100000 # 百公里
|
|
|
+ self.zigzag_duration = df['flag_zigzag'].sum() * TIME_DIFF # / self.mileage * hectokilometer
|
|
|
+ self.shake_duration = df['flag_shake'].sum() * TIME_DIFF # / self.mileage * hectokilometer
|
|
|
+ self.cadence_duration = df['flag_cadence'].sum() * TIME_DIFF # / self.mileage * hectokilometer
|
|
|
+ self.slam_brake_duration = df['flag_slam_brake'].sum() * TIME_DIFF # / self.mileage * hectokilometer
|
|
|
+ self.slam_accel_duration = df['flag_slam_accel'].sum() * TIME_DIFF # / self.mileage * hectokilometer
|
|
|
+
|
|
|
+ # 强度取值可考虑最大值,暂定平均值,具体视数据情况而定
|
|
|
+ # self.zigzag_strength = np.mean(self.zigzag_stre_list) if self.zigzag_stre_list else 0
|
|
|
+ self.zigzag_strength = (df['flag_shake'] * abs(df['accelH'])).mean()
|
|
|
+ self.shake_strength = (df['flag_shake'] * abs(df['cur_diff'])).mean()
|
|
|
+ self.cadence_strength = (df['flag_cadence'] * abs(df['lon_acc'])).mean()
|
|
|
+ self.slam_brake_strength = (df['flag_slam_brake'] * abs(df['lon_acc'])).mean()
|
|
|
+ self.slam_accel_strength = (df['flag_slam_accel'] * abs(df['lon_acc'])).mean()
|
|
|
+
|
|
|
+ self.zigzag_strength = self._nan_detect(self.zigzag_strength)
|
|
|
+ self.shake_strength = self._nan_detect(self.shake_strength)
|
|
|
+ self.cadence_strength = self._nan_detect(self.cadence_strength)
|
|
|
+ self.slam_brake_strength = self._nan_detect(self.slam_brake_strength)
|
|
|
+ self.slam_accel_strength = self._nan_detect(self.slam_accel_strength)
|
|
|
+
|
|
|
+ self.count_dict = {
|
|
|
+ "zigzag": self.zigzag_count,
|
|
|
+ "shake": self.shake_count,
|
|
|
+ "cadence": self.cadence_count,
|
|
|
+ "slamBrake": self.slam_brake_count,
|
|
|
+ "slamAccelerate": self.slam_accel_count
|
|
|
+ }
|
|
|
+
|
|
|
+ self.duration_dict = {
|
|
|
+ "zigzag": self.zigzag_duration,
|
|
|
+ "shake": self.shake_duration,
|
|
|
+ "cadence": self.cadence_duration,
|
|
|
+ "slamBrake": self.slam_brake_duration,
|
|
|
+ "slamAccelerate": self.slam_accel_duration
|
|
|
+ }
|
|
|
+
|
|
|
+ self.strength_dict = {
|
|
|
+ "zigzag": self.zigzag_strength,
|
|
|
+ "shake": self.shake_strength,
|
|
|
+ "cadence": self.cadence_strength,
|
|
|
+ "slamBrake": self.slam_brake_strength,
|
|
|
+ "slamAccelerate": self.slam_accel_strength
|
|
|
+ }
|
|
|
+
|
|
|
+ zigzag_list = [self.zigzag_count, self.zigzag_duration, self.zigzag_strength]
|
|
|
+ shake_list = [self.shake_count, self.shake_duration, self.shake_strength]
|
|
|
+ cadence_list = [self.cadence_count, self.cadence_duration, self.cadence_strength]
|
|
|
+ slam_brake_list = [self.slam_brake_count, self.slam_brake_duration, self.slam_brake_strength]
|
|
|
+ slam_accel_list = [self.slam_accel_count, self.slam_accel_duration, self.slam_accel_strength]
|
|
|
+
|
|
|
+ tmp_comf_arr = []
|
|
|
+ if "zigzag" in self.metric_list:
|
|
|
+ tmp_comf_arr += zigzag_list
|
|
|
+ self.discomfort_count += self.zigzag_count
|
|
|
+
|
|
|
+ if "shake" in self.metric_list:
|
|
|
+ tmp_comf_arr += shake_list
|
|
|
+ self.discomfort_count += self.shake_count
|
|
|
+
|
|
|
+ if "cadence" in self.metric_list:
|
|
|
+ tmp_comf_arr += cadence_list
|
|
|
+ self.discomfort_count += self.cadence_count
|
|
|
+
|
|
|
+ if "slamBrake" in self.metric_list:
|
|
|
+ tmp_comf_arr += slam_brake_list
|
|
|
+ self.discomfort_count += self.slam_brake_count
|
|
|
+
|
|
|
+ if "slamAccelerate" in self.metric_list:
|
|
|
+ tmp_comf_arr += slam_accel_list
|
|
|
+ self.discomfort_count += self.slam_accel_count
|
|
|
+
|
|
|
+ comf_arr = [tmp_comf_arr]
|
|
|
+ return comf_arr
|
|
|
+
|
|
|
+ def _nan_detect(self, num):
|
|
|
+ if math.isnan(num):
|
|
|
+ return 0
|
|
|
+ return num
|
|
|
+
|
|
|
+ def custom_metric_param_parser(self, param_list):
|
|
|
+ """
|
|
|
+ param_dict = {
|
|
|
+ "paramA" [
|
|
|
+ {
|
|
|
+ "kind": "-1",
|
|
|
+ "optimal": "1",
|
|
|
+ "multiple": ["0.5","5"],
|
|
|
+ "spare1": null,
|
|
|
+ "spare2": null
|
|
|
+ }
|
|
|
+ ]
|
|
|
+ }
|
|
|
+ """
|
|
|
+ kind_list = []
|
|
|
+ optimal_list = []
|
|
|
+ multiple_list = []
|
|
|
+ spare_list = []
|
|
|
+ # spare1_list = []
|
|
|
+ # spare2_list = []
|
|
|
+
|
|
|
+ for i in range(len(param_list)):
|
|
|
+ kind_list.append(int(param_list[i]['kind']))
|
|
|
+ optimal_list.append(float(param_list[i]['optimal']))
|
|
|
+ multiple_list.append([float(x) for x in param_list[i]['multiple']])
|
|
|
+ spare_list.append([item["param"] for item in param_list[i]["spare"]])
|
|
|
+ # spare1_list.append(param_list[i]['spare1'])
|
|
|
+ # spare2_list.append(param_list[i]['spare2'])
|
|
|
+
|
|
|
+ result = {
|
|
|
+ "kind": kind_list,
|
|
|
+ "optimal": optimal_list,
|
|
|
+ "multiple": multiple_list,
|
|
|
+ "spare": spare_list,
|
|
|
+ # "spare1": spare1_list,
|
|
|
+ # "spare2": spare2_list
|
|
|
+ }
|
|
|
+ return result
|
|
|
+
|
|
|
+ def custom_metric_score(self, metric, value, param_list):
|
|
|
+ """
|
|
|
+
|
|
|
+ """
|
|
|
+ param = self.custom_metric_param_parser(param_list)
|
|
|
+ self.custom_param_dict[metric] = param
|
|
|
+
|
|
|
+ score_model = self.scoreModel(param['kind'], param['optimal'], param['multiple'], np.array([value]))
|
|
|
+ score_sub = score_model.cal_score()
|
|
|
+ score = sum(score_sub) / len(score_sub)
|
|
|
+ return score
|
|
|
+
|
|
|
+ def comf_score_new(self):
|
|
|
+ score_metric_dict = {}
|
|
|
+ score_type_dict = {}
|
|
|
+
|
|
|
+ arr_comf = self.comf_statistic()
|
|
|
+ print("\n[舒适性表现及得分情况]")
|
|
|
+ print("舒适性各指标值:", [[round(num, 2) for num in row] for row in arr_comf])
|
|
|
+
|
|
|
+ if arr_comf:
|
|
|
+ arr_comf = np.array(arr_comf)
|
|
|
+
|
|
|
+ score_model = self.scoreModel(self.kind_list, self.optimal_list, self.multiple_list, arr_comf)
|
|
|
+ score_sub = score_model.cal_score()
|
|
|
+ score_sub = list(map(lambda x: 80 if np.isnan(x) else x, score_sub))
|
|
|
+
|
|
|
+ metric_list = [x for x in self.metric_list if x in self.config.builtinMetricList]
|
|
|
+ score_metric = []
|
|
|
+ for i in range(len(metric_list)):
|
|
|
+ score_tmp = (score_sub[i * 3 + 0] + score_sub[i * 3 + 1] + score_sub[i * 3 + 2]) / 3
|
|
|
+ score_metric.append(round(score_tmp, 2))
|
|
|
+
|
|
|
+ score_metric_dict = {key: value for key, value in zip(metric_list, score_metric)}
|
|
|
+
|
|
|
+ for metric in self.custom_metric_list:
|
|
|
+ value = self.custom_data[metric]['value']
|
|
|
+ param_list = self.customMetricParam[metric]
|
|
|
+ score = self.custom_metric_score(metric, value, param_list)
|
|
|
+ score_metric_dict[metric] = round(score, 2)
|
|
|
+
|
|
|
+ score_metric_dict = {key: score_metric_dict[key] for key in self.metric_list}
|
|
|
+ score_metric = list(score_metric_dict.values())
|
|
|
+
|
|
|
+ if self.weight_custom: # 自定义权重
|
|
|
+ score_metric_with_weight_dict = {key: score_metric_dict[key] * self.weight_dict[key] for key in
|
|
|
+ self.weight_dict}
|
|
|
+
|
|
|
+ for type in self.type_list:
|
|
|
+ type_score = sum(
|
|
|
+ value for key, value in score_metric_with_weight_dict.items() if key in self.metric_dict[type])
|
|
|
+ score_type_dict[type] = round(type_score, 2) if type_score < 100 else 100
|
|
|
+
|
|
|
+ score_type_with_weight_dict = {key: score_type_dict[key] * self.weight_type_dict[key] for key in
|
|
|
+ score_type_dict}
|
|
|
+
|
|
|
+ score_comfort = sum(score_type_with_weight_dict.values())
|
|
|
+ else: # 客观赋权
|
|
|
+ self.weight_list = cal_weight_from_80(score_metric)
|
|
|
+ self.weight_dict = {key: value for key, value in zip(self.metric_list, self.weight_list)}
|
|
|
+ score_comfort = cal_score_with_priority(score_metric, self.weight_list, self.priority_list)
|
|
|
+
|
|
|
+ for type in self.type_list:
|
|
|
+ type_weight = sum(value for key, value in self.weight_dict.items() if key in self.metric_dict[type])
|
|
|
+ for key, value in self.weight_dict.items():
|
|
|
+ if key in self.metric_dict[type]:
|
|
|
+ # self.weight_dict[key] = round(value / type_weight, 4)
|
|
|
+ self.weight_dict[key] = value / type_weight
|
|
|
+
|
|
|
+ type_score_metric = [value for key, value in score_metric_dict.items() if key in self.metric_dict[type]]
|
|
|
+ type_weight_list = [value for key, value in self.weight_dict.items() if key in self.metric_dict[type]]
|
|
|
+ type_priority_list = [value for key, value in self.priority_dict.items() if
|
|
|
+ key in self.metric_dict[type]]
|
|
|
+
|
|
|
+ type_score = cal_score_with_priority(type_score_metric, type_weight_list, type_priority_list)
|
|
|
+ score_type_dict[type] = round(type_score, 2) if type_score < 100 else 100
|
|
|
+
|
|
|
+ for key in self.weight_dict:
|
|
|
+ self.weight_dict[key] = round(self.weight_dict[key], 4)
|
|
|
+
|
|
|
+ score_type = list(score_type_dict.values())
|
|
|
+ self.weight_type_list = cal_weight_from_80(score_type)
|
|
|
+ self.weight_type_dict = {key: value for key, value in zip(self.type_list, self.weight_type_list)}
|
|
|
+
|
|
|
+ score_comfort = round(score_comfort, 2)
|
|
|
+
|
|
|
+ print("舒适性各指标基准值:", self.optimal_list)
|
|
|
+ print(f"舒适性得分为:{score_comfort:.2f}分。")
|
|
|
+ print(f"舒适性各类型得分为:{score_type_dict}。")
|
|
|
+ print(f"舒适性各指标得分为:{score_metric_dict}。")
|
|
|
+ return score_comfort, score_type_dict, score_metric_dict
|
|
|
+
|
|
|
+ # def zip_time_pairs(self, zip_list, upper_limit=9999):
|
|
|
+ # zip_time_pairs = zip(self.time_list, zip_list)
|
|
|
+ # zip_vs_time = [[x, upper_limit if y > upper_limit else y] for x, y in zip_time_pairs if not math.isnan(y)]
|
|
|
+ # return zip_vs_time
|
|
|
+
|
|
|
+ def zip_time_pairs(self, zip_list):
|
|
|
+ zip_time_pairs = zip(self.time_list, zip_list)
|
|
|
+ zip_vs_time = [[x, "" if math.isnan(y) else y] for x, y in zip_time_pairs]
|
|
|
+ return zip_vs_time
|
|
|
+
|
|
|
+ def comf_weight_distribution(self):
|
|
|
+ # get weight distribution
|
|
|
+ weight_distribution = {}
|
|
|
+ weight_distribution["name"] = "舒适性"
|
|
|
+
|
|
|
+ if "comfortLat" in self.type_list:
|
|
|
+ lat_weight_indexes_dict = {key: f"{key}({value * 100:.2f}%)" for key, value in self.weight_dict.items() if
|
|
|
+ key in self.lat_metric_list}
|
|
|
+
|
|
|
+ weight_distribution_lat = {
|
|
|
+ "latWeight": f"横向舒适度({self.weight_type_dict['comfortLat'] * 100:.2f}%)",
|
|
|
+ "indexes": lat_weight_indexes_dict
|
|
|
+ }
|
|
|
+ weight_distribution['comfortLat'] = weight_distribution_lat
|
|
|
+
|
|
|
+ if "comfortLon" in self.type_list:
|
|
|
+ lon_weight_indexes_dict = {key: f"{key}({value * 100:.2f}%)" for key, value in self.weight_dict.items() if
|
|
|
+ key in self.lon_metric_list}
|
|
|
+
|
|
|
+ weight_distribution_lon = {
|
|
|
+ "lonWeight": f"纵向舒适度({self.weight_type_dict['comfortLon'] * 100:.2f}%)",
|
|
|
+ "indexes": lon_weight_indexes_dict
|
|
|
+ }
|
|
|
+ weight_distribution['comfortLon'] = weight_distribution_lon
|
|
|
+
|
|
|
+ return weight_distribution
|
|
|
+
|
|
|
+ def _get_weight_distribution(self, dimension):
|
|
|
+ # get weight distribution
|
|
|
+ weight_distribution = {}
|
|
|
+ weight_distribution["name"] = self.config.dimension_name[dimension]
|
|
|
+
|
|
|
+ for type in self.type_list:
|
|
|
+ type_weight_indexes_dict = {key: f"{self.name_dict[key]}({value * 100:.2f}%)" for key, value in
|
|
|
+ self.weight_dict.items() if
|
|
|
+ key in self.metric_dict[type]}
|
|
|
+
|
|
|
+ weight_distribution_type = {
|
|
|
+ "weight": f"{self.type_name_dict[type]}({self.weight_type_dict[type] * 100:.2f}%)",
|
|
|
+ "indexes": type_weight_indexes_dict
|
|
|
+ }
|
|
|
+ weight_distribution[type] = weight_distribution_type
|
|
|
+
|
|
|
+ return weight_distribution
|
|
|
+
|
|
|
+ def report_statistic(self):
|
|
|
+ """
|
|
|
+
|
|
|
+ Returns:
|
|
|
+
|
|
|
+ """
|
|
|
+ # report_dict = {
|
|
|
+ # "name": "舒适性",
|
|
|
+ # "weight": f"{self.weight * 100:.2f}%",
|
|
|
+ # "weightDistribution": weight_distribution,
|
|
|
+ # "score": score_comfort,
|
|
|
+ # "level": grade_comfort,
|
|
|
+
|
|
|
+ # 'discomfortCount': self.discomfort_count,
|
|
|
+ # "description1": comf_description1,
|
|
|
+ # "description2": comf_description2,
|
|
|
+ # "description3": comf_description3,
|
|
|
+ # "description4": comf_description4,
|
|
|
+ #
|
|
|
+ # "comfortLat": lat_dict,
|
|
|
+ # "comfortLon": lon_dict,
|
|
|
+ #
|
|
|
+ # "speData": ego_speed_vs_time,
|
|
|
+ # "speMarkLine": discomfort_slices,
|
|
|
+ #
|
|
|
+ # "accData": lon_acc_vs_time,
|
|
|
+ # "accMarkLine": discomfort_acce_slices,
|
|
|
+ #
|
|
|
+ # "anvData": yawrate_vs_time,
|
|
|
+ # "anvMarkLine": discomfort_zigzag_slices,
|
|
|
+ #
|
|
|
+ # "anaData": yawrate_roc_vs_time,
|
|
|
+ # "anaMarkLine": discomfort_zigzag_slices,
|
|
|
+ #
|
|
|
+ # "curData": [cur_ego_path_vs_time, curvature_vs_time],
|
|
|
+ # "curMarkLine": discomfort_shake_slices,
|
|
|
+ # }
|
|
|
+ brakePedal_list = self.data_processed.driver_ctrl_data['brakePedal_list']
|
|
|
+ throttlePedal_list = self.data_processed.driver_ctrl_data['throttlePedal_list']
|
|
|
+ steeringWheel_list = self.data_processed.driver_ctrl_data['steeringWheel_list']
|
|
|
+
|
|
|
+ # common parameter calculate
|
|
|
+ brake_vs_time = self.zip_time_pairs(brakePedal_list)
|
|
|
+ throttle_vs_time = self.zip_time_pairs(throttlePedal_list)
|
|
|
+ steering_vs_time = self.zip_time_pairs(steeringWheel_list)
|
|
|
+
|
|
|
+ report_dict = {
|
|
|
+ "name": "舒适性",
|
|
|
+ "weight": f"{self.weight * 100:.2f}%",
|
|
|
+
|
|
|
+ 'discomfortCount': self.discomfort_count,
|
|
|
+ }
|
|
|
+
|
|
|
+ # upper_limit = 40
|
|
|
+ # times_upper = 2
|
|
|
+ # len_time = len(self.time_list)
|
|
|
+ duration = self.time_list[-1]
|
|
|
+
|
|
|
+ # comfort score and grade
|
|
|
+ score_comfort, score_type_dict, score_metric_dict = self.comf_score_new()
|
|
|
+
|
|
|
+ # get weight distribution
|
|
|
+ report_dict["weightDistribution"] = self._get_weight_distribution("comfort")
|
|
|
+
|
|
|
+ score_comfort = int(score_comfort) if int(score_comfort) == score_comfort else round(score_comfort, 2)
|
|
|
+ grade_comfort = score_grade(score_comfort)
|
|
|
+ report_dict["score"] = score_comfort
|
|
|
+ report_dict["level"] = grade_comfort
|
|
|
+
|
|
|
+ # comfort data for graph
|
|
|
+ ego_speed_list = self.ego_df['v'].values.tolist()
|
|
|
+ ego_speed_vs_time = self.zip_time_pairs(ego_speed_list)
|
|
|
+ lon_acc_list = self.ego_df['lon_acc'].values.tolist()
|
|
|
+ lon_acc_vs_time = self.zip_time_pairs(lon_acc_list)
|
|
|
+
|
|
|
+ yawrate_list = self.ego_df['speedH'].values.tolist()
|
|
|
+ yawrate_vs_time = self.zip_time_pairs(yawrate_list)
|
|
|
+ yawrate_roc_list = self.ego_df['accelH'].values.tolist()
|
|
|
+ yawrate_roc_vs_time = self.zip_time_pairs(yawrate_roc_list)
|
|
|
+ cur_ego_path_vs_time = self.zip_time_pairs(self.cur_ego_path_list)
|
|
|
+ curvature_vs_time = self.zip_time_pairs(self.curvature_list)
|
|
|
+
|
|
|
+ # markline
|
|
|
+ discomfort_df = self.discomfort_df.copy()
|
|
|
+ discomfort_df['type'] = "origin"
|
|
|
+ discomfort_slices = discomfort_df.to_dict('records')
|
|
|
+
|
|
|
+ # discomfort_zigzag_df = self.discomfort_df.copy()
|
|
|
+ # discomfort_zigzag_df.loc[discomfort_zigzag_df['type'] != 'zigzag', 'type'] = "origin"
|
|
|
+ # discomfort_zigzag_slices = discomfort_zigzag_df.to_dict('records')
|
|
|
+ #
|
|
|
+ # discomfort_shake_df = self.discomfort_df.copy()
|
|
|
+ # discomfort_shake_df.loc[discomfort_shake_df['type'] != 'shake', 'type'] = "origin"
|
|
|
+ # discomfort_shake_slices = discomfort_shake_df.to_dict('records')
|
|
|
+ #
|
|
|
+ # discomfort_acce_df = self.discomfort_df.copy()
|
|
|
+ # discomfort_acce_df.loc[discomfort_acce_df['type'] == 'zigzag', 'type'] = "origin"
|
|
|
+ # discomfort_acce_df.loc[discomfort_acce_df['type'] == 'shake', 'type'] = "origin"
|
|
|
+ # discomfort_acce_slices = discomfort_acce_df.to_dict('records')
|
|
|
+
|
|
|
+ # for description
|
|
|
+ good_type_list = []
|
|
|
+ bad_type_list = []
|
|
|
+
|
|
|
+ good_metric_list = []
|
|
|
+ bad_metric_list = []
|
|
|
+
|
|
|
+ # str for comf description 1&2
|
|
|
+ str_uncomf_count = ''
|
|
|
+ str_uncomf_over_optimal = ''
|
|
|
+
|
|
|
+ type_details_dict = {}
|
|
|
+
|
|
|
+ for type in self.type_list:
|
|
|
+ bad_type_list.append(type) if score_type_dict[type] < 80 else good_type_list.append(type)
|
|
|
+
|
|
|
+ type_dict = {
|
|
|
+ "name": f"{self.type_name_dict[type]}",
|
|
|
+ }
|
|
|
+
|
|
|
+ builtin_graph_dict = {}
|
|
|
+ custom_graph_dict = {}
|
|
|
+
|
|
|
+ score_type = score_type_dict[type]
|
|
|
+ grade_type = score_grade(score_type)
|
|
|
+ type_dict["score"] = score_type
|
|
|
+ type_dict["level"] = grade_type
|
|
|
+
|
|
|
+ type_dict_indexes = {}
|
|
|
+
|
|
|
+ flag_acc = False
|
|
|
+ for metric in self.metric_dict[type]:
|
|
|
+ bad_metric_list.append(metric) if score_metric_dict[metric] < 80 else good_metric_list.append(metric)
|
|
|
+
|
|
|
+ if metric in self.bulitin_metric_list:
|
|
|
+ # for indexes
|
|
|
+ type_dict_indexes[metric] = {
|
|
|
+ # "name": f"{self.name_dict[metric]}({self.unit_dict[metric]})",
|
|
|
+ "name": f"{self.name_dict[metric]}",
|
|
|
+ "score": score_metric_dict[metric],
|
|
|
+ "numberReal": f"{self.count_dict[metric]}",
|
|
|
+ "numberRef": f"{self.optimal1_dict[metric]:.4f}",
|
|
|
+ "durationReal": f"{self.duration_dict[metric]:.2f}",
|
|
|
+ "durationRef": f"{self.optimal2_dict[metric]:.4f}",
|
|
|
+ "strengthReal": f"{self.strength_dict[metric]:.2f}",
|
|
|
+ "strengthRef": f"{self.optimal3_dict[metric]}"
|
|
|
+ }
|
|
|
+
|
|
|
+ # for description
|
|
|
+ if self.count_dict[metric] > 0:
|
|
|
+ str_uncomf_count += f'{self.count_dict[metric]}次{self.name_dict[metric]}行为、'
|
|
|
+
|
|
|
+ if self.count_dict[metric] > self.optimal1_dict[metric]:
|
|
|
+ over_optimal = ((self.count_dict[metric] - self.optimal1_dict[metric]) / self.optimal1_dict[
|
|
|
+ metric]) * 100
|
|
|
+ str_uncomf_over_optimal += f'{self.name_dict[metric]}次数比基准值高{over_optimal:.2f}%,'
|
|
|
+
|
|
|
+ if self.duration_dict[metric] > self.optimal2_dict[metric]:
|
|
|
+ over_optimal = ((self.duration_dict[metric] - self.optimal2_dict[metric]) / self.optimal2_dict[
|
|
|
+ metric]) * 100
|
|
|
+ str_uncomf_over_optimal += f'{self.name_dict[metric]}时长比基准值高{over_optimal:.2f}%,'
|
|
|
+
|
|
|
+ if self.strength_dict[metric] > self.optimal3_dict[metric]:
|
|
|
+ over_optimal = ((self.strength_dict[metric] - self.optimal3_dict[metric]) / self.optimal3_dict[
|
|
|
+ metric]) * 100
|
|
|
+ str_uncomf_over_optimal += f'{self.name_dict[metric]}强度比基准值高{over_optimal:.2f}%;'
|
|
|
+
|
|
|
+ # report_dict["speData"] = ego_speed_vs_time
|
|
|
+ # report_dict["accData"] = lon_acc_vs_time
|
|
|
+ # report_dict["anvData"] = yawrate_vs_time
|
|
|
+ # report_dict["anaData"] = yawrate_roc_vs_time
|
|
|
+ # report_dict["curData"] = [cur_ego_path_vs_time, curvature_vs_time]
|
|
|
+
|
|
|
+ # report_dict["speMarkLine"] = discomfort_slices
|
|
|
+ # report_dict["accMarkLine"] = discomfort_acce_slices
|
|
|
+ # report_dict["anvMarkLine"] = discomfort_zigzag_slices
|
|
|
+ # report_dict["anaMarkLine"] = discomfort_zigzag_slices
|
|
|
+ # report_dict["curMarkLine"] = discomfort_shake_slices
|
|
|
+
|
|
|
+ if metric == "zigzag":
|
|
|
+ metric_data = {
|
|
|
+ "name": "横摆角加速度(rad/s²)",
|
|
|
+ "data": yawrate_roc_vs_time,
|
|
|
+ "range": f"[-{self.optimal3_dict[metric]}, {self.optimal3_dict[metric]}]",
|
|
|
+ # "range": f"[0, {self.optimal3_dict[metric]}]",
|
|
|
+ # "markLine": discomfort_zigzag_slices
|
|
|
+ }
|
|
|
+ builtin_graph_dict[metric] = metric_data
|
|
|
+
|
|
|
+ elif metric == "shake":
|
|
|
+ metric_data = {
|
|
|
+ "name": "曲率(1/m)",
|
|
|
+ "legend": ["自车轨迹曲率", "车道中心线曲率"],
|
|
|
+ "data": [cur_ego_path_vs_time, curvature_vs_time],
|
|
|
+ "range": f"[-{self.optimal3_dict[metric]}, {self.optimal3_dict[metric]}]",
|
|
|
+ # "range": f"[0, {self.optimal3_dict[metric]}]",
|
|
|
+ # "markLine": discomfort_shake_slices
|
|
|
+ }
|
|
|
+ builtin_graph_dict[metric] = metric_data
|
|
|
+
|
|
|
+ elif metric in ["cadence", "slamBrake", "slamAccelerate"] and not flag_acc:
|
|
|
+ metric_data = {
|
|
|
+ "name": "自车纵向加速度(m/s²)",
|
|
|
+ "data": lon_acc_vs_time,
|
|
|
+ "range": f"[-{self.optimal3_dict[metric]}, {self.optimal3_dict[metric]}]",
|
|
|
+ # "range": f"[0, {self.optimal3_dict[metric]}]",
|
|
|
+ # "markLine": discomfort_acce_slices
|
|
|
+ }
|
|
|
+ flag_acc = True
|
|
|
+
|
|
|
+ builtin_graph_dict[metric] = metric_data
|
|
|
+
|
|
|
+ else:
|
|
|
+ # for indexes
|
|
|
+ type_dict_indexes[metric] = {
|
|
|
+ # "name": f"{self.name_dict[metric]}({self.unit_dict[metric]})",
|
|
|
+ "name": f"{self.name_dict[metric]}",
|
|
|
+ "score": score_metric_dict[metric],
|
|
|
+ "numberReal": f"{self.custom_data[metric]['tableData']['avg']}",
|
|
|
+ "numberRef": f"-",
|
|
|
+ "durationReal": f"{self.custom_data[metric]['tableData']['max']}",
|
|
|
+ "durationRef": f"-",
|
|
|
+ "strengthReal": f"{self.custom_data[metric]['tableData']['min']}",
|
|
|
+ "strengthRef": f"-"
|
|
|
+ }
|
|
|
+ custom_graph_dict[metric] = self.custom_data[metric]['reportData']
|
|
|
+
|
|
|
+ str_uncomf_over_optimal = str_uncomf_over_optimal[:-1] + ";"
|
|
|
+ type_dict["indexes"] = type_dict_indexes
|
|
|
+ type_dict["builtin"] = builtin_graph_dict
|
|
|
+ type_dict["custom"] = custom_graph_dict
|
|
|
+
|
|
|
+ type_details_dict[type] = type_dict
|
|
|
+
|
|
|
+ report_dict["details"] = type_details_dict
|
|
|
+
|
|
|
+ # str for comf description2
|
|
|
+ if grade_comfort == '优秀':
|
|
|
+ comf_description1 = '乘客在本轮测试中体验舒适;'
|
|
|
+ elif grade_comfort == '良好':
|
|
|
+ comf_description1 = '算法在本轮测试中的表现满⾜设计指标要求;'
|
|
|
+ elif grade_comfort == '一般':
|
|
|
+ str_bad_metric = string_concatenate(bad_metric_list)
|
|
|
+ comf_description1 = f'未满足设计指标要求。算法需要在{str_bad_metric}上进一步优化。在{(self.mileage / 1000):.2f}公里内,共发生{str_uncomf_count[:-1]};'
|
|
|
+ elif grade_comfort == '较差':
|
|
|
+ str_bad_metric = string_concatenate(bad_metric_list)
|
|
|
+ comf_description1 = f'乘客体验极不舒适,未满足设计指标要求。算法需要在{str_bad_metric}上进一步优化。在{(self.mileage / 1000):.2f}公里内,共发生{str_uncomf_count[:-1]};'
|
|
|
+
|
|
|
+ if not bad_metric_list:
|
|
|
+ str_comf_type = string_concatenate(good_metric_list)
|
|
|
+ comf_description2 = f"{str_comf_type}均表现良好。"
|
|
|
+ else:
|
|
|
+ str_bad_metric = string_concatenate(bad_metric_list)
|
|
|
+
|
|
|
+ if not good_metric_list:
|
|
|
+ comf_description2 = f"{str_bad_metric}表现不佳。其中{str_uncomf_over_optimal}。"
|
|
|
+ else:
|
|
|
+ str_comf_type = string_concatenate(good_metric_list)
|
|
|
+ comf_description2 = f"{str_comf_type}表现良好;{str_bad_metric}表现不佳。其中{str_uncomf_over_optimal}。"
|
|
|
+
|
|
|
+ # str for comf description3
|
|
|
+ control_type = []
|
|
|
+ if 'zigzag' in bad_metric_list or 'shake' in bad_metric_list:
|
|
|
+ control_type.append('横向')
|
|
|
+ if 'cadence' in bad_metric_list or 'slamBrake' in bad_metric_list or 'slamAccelerate' in bad_metric_list in bad_metric_list:
|
|
|
+ control_type.append('纵向')
|
|
|
+ str_control_type = '和'.join(control_type)
|
|
|
+
|
|
|
+ if not control_type:
|
|
|
+ comf_description3 = f"算法的横向和纵向控制表现俱佳,乘坐体验舒适。"
|
|
|
+ else:
|
|
|
+ comf_description3 = f"算法应该优化对车辆的{str_control_type}控制,优化乘坐体验。"
|
|
|
+
|
|
|
+ uncomf_time = self.discomfort_duration
|
|
|
+ if uncomf_time == 0:
|
|
|
+ comf_description4 = ""
|
|
|
+ else:
|
|
|
+ percent4 = uncomf_time / duration * 100
|
|
|
+ comf_description4 = f"在{duration}s时间内,乘客有{percent4:.2f}%的时间存在不舒适感受。"
|
|
|
+
|
|
|
+ report_dict["description1"] = replace_key_with_value(comf_description1, self.name_dict)
|
|
|
+ report_dict["description2"] = replace_key_with_value(comf_description2, self.name_dict)
|
|
|
+ report_dict["description3"] = comf_description3
|
|
|
+ report_dict["description4"] = comf_description4
|
|
|
+
|
|
|
+ report_dict['commonData'] = {
|
|
|
+ "per": {
|
|
|
+ "name": "刹车/油门踏板开度(百分比)",
|
|
|
+ "legend": ["刹车踏板开度", "油门踏板开度"],
|
|
|
+ "data": [brake_vs_time, throttle_vs_time]
|
|
|
+ },
|
|
|
+ "ang": {
|
|
|
+ "name": "方向盘转角(角度°)",
|
|
|
+ "data": steering_vs_time
|
|
|
+ },
|
|
|
+ "spe": {
|
|
|
+ "name": "速度(km/h)",
|
|
|
+ # "legend": ["自车速度", "目标车速度", "自车与目标车相对速度"],
|
|
|
+ "data": ego_speed_vs_time
|
|
|
+
|
|
|
+ },
|
|
|
+ # "acc": {
|
|
|
+ # "name": "自车纵向加速度(m/s²)",
|
|
|
+ # "data": lon_acc_vs_time
|
|
|
+ #
|
|
|
+ # },
|
|
|
+ # "dis": {
|
|
|
+ # "name": "前车距离(m)",
|
|
|
+ # "data": distance_vs_time
|
|
|
+ # }
|
|
|
+ }
|
|
|
+
|
|
|
+ report_dict["commonMarkLine"] = discomfort_slices
|
|
|
+
|
|
|
+ # report_dict = {
|
|
|
+ # "name": "舒适性",
|
|
|
+ # "weight": f"{self.weight * 100:.2f}%",
|
|
|
+ # "weightDistribution": weight_distribution,
|
|
|
+ # "score": score_comfort,
|
|
|
+ # "level": grade_comfort,
|
|
|
+ # 'discomfortCount': self.discomfort_count,
|
|
|
+ # "description1": comf_description1,
|
|
|
+ # "description2": comf_description2,
|
|
|
+ # "description3": comf_description3,
|
|
|
+ # "description4": comf_description4,
|
|
|
+ #
|
|
|
+ # "comfortLat": lat_dict,
|
|
|
+ # "comfortLon": lon_dict,
|
|
|
+ #
|
|
|
+ # "speData": ego_speed_vs_time,
|
|
|
+ # "speMarkLine": discomfort_slices,
|
|
|
+ #
|
|
|
+ # "accData": lon_acc_vs_time,
|
|
|
+ # "accMarkLine": discomfort_acce_slices,
|
|
|
+ #
|
|
|
+ # "anvData": yawrate_vs_time,
|
|
|
+ # "anvMarkLine": discomfort_zigzag_slices,
|
|
|
+ #
|
|
|
+ # "anaData": yawrate_roc_vs_time,
|
|
|
+ # "anaMarkLine": discomfort_zigzag_slices,
|
|
|
+ #
|
|
|
+ # "curData": [cur_ego_path_vs_time, curvature_vs_time],
|
|
|
+ # "curMarkLine": discomfort_shake_slices,
|
|
|
+ # }
|
|
|
+ self.eval_data = self.ego_df.copy()
|
|
|
+ self.eval_data['playerId'] = 1
|
|
|
+
|
|
|
+ return report_dict
|
|
|
+
|
|
|
+ def get_eval_data(self):
|
|
|
+ df = self.eval_data[
|
|
|
+ ['simTime', 'simFrame', 'playerId', 'ip_acc', 'ip_dec', 'slam_brake', 'slam_accel', 'cadence',
|
|
|
+ 'cur_ego_path', 'cur_diff', 'R', 'R_ego', 'R_diff']].copy()
|
|
|
+ return df
|