single_case_evaluate.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/11/28
  11. @Last Modified: 2023/11/28
  12. @Summary: Evaluate single case.
  13. """
  14. import os
  15. import sys
  16. import pandas as pd
  17. import numpy as np
  18. import requests
  19. import json
  20. import pathlib
  21. import time
  22. import traceback
  23. import log
  24. from common import score_grade, mileage_format, duration_format, string_concatenate, replace_key_with_value, \
  25. import_class
  26. from data_process import DataProcess
  27. # from customDimension import CustomDimension
  28. # from custom_run import custom_run
  29. # from safe import Safe
  30. # from function import Function
  31. # from compliance import Compliance
  32. from comfort import Comfort
  33. from efficient import Efficient
  34. def single_case_evaluate(path, config, case_name):
  35. """
  36. This function takes a str of single case' path and a dict of config infos.
  37. The function uses the scripts of 5 dimensions to analyze and evaluate the single case.
  38. Arguments:
  39. path: A str of a single case's csv files path.
  40. config: A dict of config infos, which contains the algorithm info, index info and so on.
  41. case_name: A str of case name.
  42. Returns:
  43. case_dict: A dict containing evaluation results. Basic infos, scores and descriptions of 5 dimensions.
  44. """
  45. logger = log.get_logger()
  46. try:
  47. data_processed = DataProcess(path, config)
  48. except Exception as e:
  49. traceback.print_exc()
  50. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Data processed ERROR: {repr(e)}!", exc_info=True)
  51. # custom metrics evaluate
  52. # customMetricPath = '../custom'
  53. # custom_metric_list = [metric for metric in config.metric_list if metric not in config.builtinMetricList]
  54. custom_data = dict()
  55. # if customMetricPath if empty
  56. # try:
  57. # if custom_metric_list:
  58. # custom_data = custom_run(customMetricPath, data_processed, custom_metric_list, case_name)
  59. # else:
  60. # custom_data = dict()
  61. #
  62. # except Exception as e:
  63. # traceback.print_exc()
  64. # logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Custom metric evaluate ERROR: {repr(e)}!", exc_info=True)
  65. # scoreFile = 'linear_score'
  66. # custom score
  67. # if config.scoreModel == "builtin":
  68. # scorePath = r"./"
  69. # scoreFile = "score_weight"
  70. # scoreModel = import_class(scorePath, scoreFile)
  71. # else:
  72. # # customScorePath = r"C:\Users\cicv\Desktop\ads_evaluate_V4.1.0\customScore"
  73. # scoreFile = config.scoreModel
  74. # scoreModel = import_class(customScorePath, scoreFile)
  75. scorePath = r"./"
  76. scoreFile = "score_weight"
  77. scoreModel = import_class(scorePath, scoreFile)
  78. builtin_dimension_list = config.builtinDimensionList
  79. dimension_name_dict = config.dimension_name
  80. # initialization
  81. case_dict = {}
  82. case_dict["name"] = case_name
  83. # score info
  84. dimension_list = config.dimension_list
  85. score_dimension_dict = {}
  86. bad_dimension_list = []
  87. # statistic the index by good or bad score
  88. good_indicator_list = []
  89. bad_indicator_list = []
  90. # bad_indicator_count_list = []
  91. dimension_dict = {}
  92. # eval_data_list = []
  93. try:
  94. logger.info(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate start: ")
  95. for dimension in dimension_list:
  96. dimension_instance = globals()[dimension.capitalize()](data_processed, custom_data, scoreModel, path)
  97. # if dimension in builtin_dimension_list:
  98. # dimension_instance = globals()[dimension.capitalize()](data_processed, custom_data, scoreModel)
  99. # else:
  100. # dimension_instance = CustomDimension(dimension, data_processed, custom_data, scoreModel)
  101. dimension_report_dict = dimension_instance.report_statistic()
  102. dimension_dict[dimension] = dimension_report_dict
  103. score_dimension_dict[dimension] = dimension_report_dict['score']
  104. # dimension_eval_data = dimension_instance.get_eval_data()
  105. # if not dimension_eval_data.empty:
  106. # eval_data_list.append(dimension_eval_data)
  107. # eval_data_dict[dimension] = dimension_eval_data
  108. if score_dimension_dict[dimension] < 80:
  109. bad_dimension_list.append(dimension)
  110. # if dimension == "function":
  111. # followStopCount = dimension_report_dict['followStopCount']
  112. # bad_indicator_list.append("跟停行为") if followStopCount != 0 else good_indicator_list.append("跟停行为")
  113. # elif dimension == "compliance":
  114. # illegalCount = dimension_report_dict['illegalCount']
  115. # bad_indicator_list.append("违反交通规则行为") if illegalCount != 0 else good_indicator_list.append("违反交通规则行为")
  116. # elif dimension == "comfort":
  117. # discomfortCount = dimension_report_dict['discomfortCount']
  118. # bad_indicator_list.append("不舒适行为") if discomfortCount != 0 else good_indicator_list.append("不舒适行为")
  119. except Exception as e:
  120. traceback.print_exc()
  121. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate ERROR: {repr(e)}!", exc_info=True)
  122. sys.exit(-1)
  123. case_dict["details"] = dimension_dict
  124. # calculate the score and grade of the case
  125. weight_dimension_dict = config.dimension_weight
  126. print("weight_dimension_dict is", weight_dimension_dict)
  127. score_dimension = {key: score_dimension_dict[key] * weight_dimension_dict[key] for key in score_dimension_dict}
  128. score_case = round(sum(score_dimension.values()), 2)
  129. grade_case = score_grade(score_case)
  130. case_dict['algorithmComprehensiveScore'] = score_case
  131. case_dict['algorithmLevel'] = grade_case
  132. case_dict['testMileage'] = data_processed.report_info['mileage']
  133. case_dict['testDuration'] = data_processed.report_info['duration']
  134. # generate algorithmResultDescription1 according to the actual case
  135. # if bad_indicator_list:
  136. # for type in bad_indicator_list:
  137. # if type == '不舒适行为':
  138. # bad_indicator_count_list.append(f'{discomfortCount}次不舒适行为')
  139. # if type == '跟停行为':
  140. # bad_indicator_count_list.append(f'{followStopCount}次跟停行为')
  141. # if type == '违反交通规则行为':
  142. # bad_indicator_count_list.append(f'违反交通规则{illegalCount}次')
  143. # str_bad_indicator_count = string_concatenate(bad_indicator_count_list) if bad_indicator_count_list else ""
  144. # str_good_indicator = string_concatenate(good_indicator_list) if good_indicator_list else ""
  145. # algorithmResultDescription1 = "车辆在本轮测试中,"
  146. # if str_bad_indicator_count:
  147. # algorithmResultDescription1 += f"共出现{str_bad_indicator_count},"
  148. # if str_good_indicator:
  149. # algorithmResultDescription1 += f"未出现{str_good_indicator}。"
  150. #
  151. # if not str_good_indicator and not str_bad_indicator_count:
  152. # algorithmResultDescription1 += "【未进行舒适性、跟车及合规性评测】。"
  153. # generate algorithmResultDescription2
  154. if not bad_dimension_list:
  155. algorithmResultDescription2 = '综上所述,算法在各个维度的表现俱佳。'
  156. else:
  157. str_bad_dimension = string_concatenate(bad_dimension_list)
  158. # replace English to Chinese dimension name
  159. algorithmResultDescription2 = f'综上所述,建议算法优化在{str_bad_dimension}指标上的表现。'
  160. description = f"算法总体表现{grade_case},高效性表现{dimension_dict['efficient']['level']},平顺性表现{dimension_dict['comfort']['level']}。"
  161. # case_dict['algorithmResultDescription1'] = algorithmResultDescription1
  162. # case_dict['algorithmResultDescription2'] = algorithmResultDescription2
  163. # case_dict['algorithmResultDescription1'] = replace_key_with_value(algorithmResultDescription1, dimension_name_dict)
  164. # case_dict['algorithmResultDescription'] = replace_key_with_value(algorithmResultDescription2, dimension_name_dict)
  165. case_dict['algorithmResultDescription'] = description
  166. graph_dict = {
  167. "线速度": os.path.join(path, "LinearAccelerate.png"),
  168. "角速度": os.path.join(path, "AngularAccelerate.png"),
  169. "速度": os.path.join(path, "Speed.png")
  170. }
  171. case_dict['graphPath'] = graph_dict
  172. # for df in eval_data_list:
  173. # eval_data_df = pd.merge(eval_data_df, df, on=['simTime', 'simFrame', 'playerId'], how="left")
  174. # eval_data_df = eval_data_df[eval_data_df['simFrame'] > 0]
  175. #
  176. # case_dict['evalData'] = eval_data_df
  177. # case_dict['playbackData'] = playback(eval_data_df)
  178. # report.json
  179. # case_dict = {
  180. # 'testMileage': mileage,
  181. # 'testDuration': duration,
  182. # 'algorithmResultDescription1': algorithmResultDescription1,
  183. # 'algorithmResultDescription2': algorithmResultDescription2,
  184. # 'algorithmComprehensiveScore': score_case,
  185. # 'algorithmLevel': grade_case,
  186. #
  187. # 'safe': safe_report_dict,
  188. # 'function': func_report_dict,
  189. # 'compliance': comp_report_dict,
  190. # 'comfort': comf_report_dict,
  191. # 'efficient': effi_report_dict,
  192. #
  193. # 'commonData': common_report_dict
  194. #
  195. # }
  196. return case_dict
  197. def playback(df):
  198. """
  199. Args:
  200. df:
  201. Returns:
  202. """
  203. target_id = 2
  204. # choose_df = df[["simTime", "simFrame", "v", "lat_v_rel", "lon_v_rel", "lat_d", "lon_d", "curvHor", "rollRel", "pitchRel"]].copy()
  205. ego_df = df[df['playerId'] == 1][["simTime", "simFrame", "v", "curvHor", "rollRel", "pitchRel"]].copy()
  206. if "lat_v_rel" in df.columns:
  207. rel_df = df[df['playerId'] == target_id][
  208. ["simTime", "simFrame", "lat_v_rel", "lon_v_rel", "lat_d", "lon_d"]].copy()
  209. result = pd.merge(ego_df, rel_df, on=['simTime', 'simFrame'], how='left')
  210. else:
  211. result = ego_df.copy()
  212. result["lat_v_rel"] = ["-"] * len(result)
  213. result["lon_v_rel"] = ["-"] * len(result)
  214. result["lat_d"] = ["-"] * len(result)
  215. result["lon_d"] = ["-"] * len(result)
  216. result.rename(
  217. columns={"v": "speed", "lat_v_rel": "latSpeedRel", "lon_v_rel": "lonSpeedRel", "lat_d": "latDistanceRel",
  218. "lon_d": "lonDistanceRel"}, inplace=True)
  219. result = result[result['simFrame'] > 0]
  220. # simTime, simFrame, speed, latSpeedRel, lonSpeedRel,
  221. # latDistanceRel, lonDistanceRel, curvHor, rollRel, pitchRel
  222. return result
  223. def single_case_statistic(case_dict):
  224. """
  225. This function add the basic infos to the case_dict.
  226. Arguments:
  227. case_dict: A dict of single case scores and descriptions.
  228. config: A dict of config infos, contains basic infos.
  229. Returns:
  230. single_case_dict: A full dict of single case's performance.
  231. """
  232. single_case_dict = case_dict.copy()
  233. single_case_dict['testMileage'] = mileage_format(single_case_dict['testMileage'])
  234. single_case_dict['testDuration'] = duration_format(single_case_dict['testDuration'])
  235. # single_case_dict = {
  236. # 'testMileage': mileage,
  237. # 'testDuration': duration,
  238. # 'algorithmResultDescription1': algorithmResultDescription1,
  239. # 'algorithmResultDescription2': algorithmResultDescription2,
  240. # 'algorithmComprehensiveScore': score_case,
  241. # 'algorithmLevel': grade_case,
  242. # 'safe': safe_report_dict,
  243. # 'function': func_report_dict,
  244. # 'compliance': comp_report_dict,
  245. # 'comfort': comf_report_dict,
  246. # 'efficient': effi_report_dict
  247. # }
  248. return single_case_dict
  249. def single_report_post(single_case_dict, case_path):
  250. """
  251. This function generate the single case report based on single_case_dict.
  252. Arguments:
  253. single_case_dict: A dict of single case scores and descriptions.
  254. case_path: A str of path of files, which says where the generated report is to be stored.
  255. Returns:
  256. None
  257. """
  258. print('准备发送请求:')
  259. report_name = case_path.split('\\')[-2] # get case name for report name
  260. url_json = 'http://36.110.106.156:18080/report/generate'
  261. data_json = json.dumps(single_case_dict)
  262. response = requests.post(url_json, data_json, headers={'Content-Type': 'application/json; charset=utf-8'})
  263. print("返回报告结果:", response)
  264. runtime = time.strftime('%Y%m%d%H%M%S', time.localtime())
  265. p = pathlib.Path(rf'{case_path}\{report_name}_{runtime}.pdf')
  266. p.write_bytes(response.content)