single_case_evaluate.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/11/28
  11. @Last Modified: 2023/11/28
  12. @Summary: Evaluate single case.
  13. """
  14. import os
  15. import sys
  16. import json
  17. import traceback
  18. import log
  19. from common import score_grade, mileage_format, duration_format, string_concatenate, replace_key_with_value, \
  20. import_class
  21. from config_parser import ConfigParse
  22. from data_process import DataProcess
  23. from report_generate import report_generate
  24. from safe import Safe
  25. from comfort import Comfort
  26. from accurate import Accurate
  27. from efficient import Efficient
  28. def single_case_eval(configPath, dataPath, resultPath, trackPath, case_name):
  29. """
  30. :param configPath: 配置文件路径
  31. :param dataPath: 评价数据路径
  32. :param resultPath: 评价结果路径
  33. :param trackPath: 轨迹图片路径
  34. :param case_name: 用例名称
  35. :return: None
  36. """
  37. # 获取日志记录器
  38. logger = log.get_logger()
  39. # 判断文件夹是否为空
  40. if len(os.listdir(dataPath)) == 0:
  41. print("No files in data_path!") # 路径异常
  42. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: No files in data_path!")
  43. sys.exit(-1)
  44. # 解析配置文件信息,获得各指标权重及评分系数
  45. try:
  46. config = ConfigParse(configPath)
  47. except Exception as e:
  48. print('Config file parsing ERROR!', e)
  49. traceback.print_exc()
  50. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Config file parsing ERROR: {repr(e)}!", exc_info=True)
  51. sys.exit(-1)
  52. # 单用例评价,并生成报告
  53. try:
  54. reportDict = single_case_evaluate(config, dataPath, resultPath, case_name) # 评估单用例
  55. # reportDict = single_case_statistic(case_report_dict) # 对单用例结果增加内容
  56. # 生成过程文件report.json
  57. with open(f'{resultPath}/report.json', 'w', encoding='utf-8') as f:
  58. f.write(json.dumps(reportDict, ensure_ascii=False))
  59. # 通过report.json生成报告
  60. reportPdf = os.path.join(resultPath, 'report.pdf')
  61. report_generate(reportDict, reportPdf, trackPath)
  62. except Exception as e:
  63. traceback.print_exc()
  64. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Evaluate single case ERROR: {repr(e)}!", exc_info=True)
  65. sys.exit(-1)
  66. def single_case_evaluate(config, dataPath, resultPath, case_name):
  67. """
  68. This function takes a str of single case' path and a dict of config info.
  69. The function uses the scripts of 5 dimensions to analyze and evaluate the single case.
  70. Arguments:
  71. dataPath: A str of a single case's csv files path.
  72. config: A dict of config info, which contains the algorithm info, index info and so on.
  73. resultPath: A str of report path.
  74. case_name: A str of case name.
  75. Returns:
  76. case_report_dict: A dict containing evaluation results. Basic info, scores and descriptions of 5 dimensions.
  77. """
  78. # 获取日志记录器
  79. logger = log.get_logger()
  80. # 数据处理
  81. try:
  82. data_processed = DataProcess(dataPath, config) # ego_df; obj_df; trajectory_df
  83. except Exception as e:
  84. traceback.print_exc()
  85. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Data processed ERROR: {repr(e)}!", exc_info=True)
  86. sys.exit(-1)
  87. # 选择评分模型(默认内置模型)
  88. scorePath = r"./"
  89. scoreFile = "score_weight"
  90. scoreModel = import_class(scorePath, scoreFile)
  91. # initialization
  92. case_report_dict = {
  93. "name": case_name
  94. }
  95. # score info
  96. dimension_list = config.dimension_list
  97. score_dimension_dict = {}
  98. bad_dimension_list = []
  99. dimension_dict = {}
  100. # 分维度进行评价
  101. try:
  102. logger.info(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate start: ")
  103. for dimension in dimension_list:
  104. # print("dimension is", dimension)
  105. # 根据维度列表动态调用类
  106. dimension_instance = globals()[dimension.capitalize()](data_processed, scoreModel, resultPath)
  107. # print("dimension_instance is", dimension_instance)
  108. # 运行各维度评价函数,进行分析评价及报告内容统计
  109. dimension_report_dict = dimension_instance.report_statistic()
  110. dimension_dict[dimension] = dimension_report_dict
  111. score_dimension_dict[dimension] = dimension_report_dict['score']
  112. # 对各维度分数进行判断,分类表现差的维度
  113. if score_dimension_dict[dimension] < 80:
  114. bad_dimension_list.append(dimension)
  115. # if dimension == "comfort":
  116. # discomfortCount = dimension_report_dict['discomfortCount']
  117. # bad_indicator_list.append("不舒适行为") if discomfortCount != 0 else good_indicator_list.append("不舒适行为")
  118. # 异常打印,并退出
  119. except Exception as e:
  120. traceback.print_exc()
  121. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate ERROR: {repr(e)}!", exc_info=True)
  122. sys.exit(-1)
  123. # 将评价结果写入报告字典内
  124. case_report_dict["details"] = dimension_dict
  125. # calculate the score and grade of the case
  126. weight_dimension_dict = config.dimension_weight
  127. score_dimension = {key: score_dimension_dict[key] * weight_dimension_dict[key] for key in score_dimension_dict}
  128. # 计算用例分数、等级,写入报告字典
  129. score_case = round(sum(score_dimension.values()), 2)
  130. grade_case = score_grade(score_case)
  131. case_report_dict['algorithmComprehensiveScore'] = score_case
  132. case_report_dict['algorithmLevel'] = grade_case
  133. # 计算用例里程和持续时长
  134. # case_report_dict['testMileage'] = data_processed.report_info['mileage']
  135. # case_report_dict['testDuration'] = data_processed.report_info['duration']
  136. case_report_dict['testMileage'] = mileage_format(data_processed.report_info['mileage'])
  137. case_report_dict['testDuration'] = duration_format(data_processed.report_info['duration'])
  138. # generate algorithmResultDescription2
  139. if not bad_dimension_list:
  140. algorithmResultDescription2 = '算法在各个维度的表现俱佳。'
  141. else:
  142. str_bad_dimension = string_concatenate(bad_dimension_list)
  143. # replace English to Chinese dimension name
  144. algorithmResultDescription2 = f'建议算法优化在{str_bad_dimension}方面的表现。'
  145. algorithmResultDescription2 = replace_key_with_value(algorithmResultDescription2, config.dimension_name)
  146. algorithmResultDescription = f"算法得分{score_case}分,总体表现{grade_case}。{algorithmResultDescription2}"
  147. case_report_dict['algorithmResultDescription'] = algorithmResultDescription
  148. # 生成图片字典,将过程生成图片路径记录,用于报告生成
  149. graph_dict = {
  150. # "speed": os.path.join(resultPath, "Speed.png"),
  151. # "commandSpeed": os.path.join(resultPath, "CommandSpeed.png"),
  152. # "linearAccelerate": os.path.join(resultPath, "LinearAccelerate.png"),
  153. # "angularAccelerate": os.path.join(resultPath, "AngularAccelerate.png"),
  154. # "trajectory": os.path.join(resultPath, "track.png")
  155. "速度": os.path.join(resultPath, "Speed.png"),
  156. "控制指令速度": os.path.join(resultPath, "CommandSpeed.png"),
  157. "线加速度": os.path.join(resultPath, "LinearAccelerate.png"),
  158. "角加速度": os.path.join(resultPath, "AngularAccelerate.png"),
  159. "行驶轨迹": os.path.join(resultPath, "track.png")
  160. }
  161. case_report_dict['graphPath'] = graph_dict
  162. return case_report_dict