single_case_evaluate.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #!/usr/bin/env python
  2. # -*- coding: utf-8 -*-
  3. ##################################################################
  4. #
  5. # Copyright (c) 2023 CICV, Inc. All Rights Reserved
  6. #
  7. ##################################################################
  8. """
  9. @Authors: yangzihao(yangzihao@china-icv.cn)
  10. @Data: 2023/11/28
  11. @Last Modified: 2023/11/28
  12. @Summary: Evaluate single case.
  13. """
  14. import os
  15. import sys
  16. import json
  17. import traceback
  18. import log
  19. from common import score_grade, mileage_format, duration_format, string_concatenate, replace_key_with_value, \
  20. import_class
  21. from config_parser import ConfigParse
  22. from data_process import DataProcess
  23. from report_generate import report_generate
  24. from safe import Safe
  25. from comfort import Comfort
  26. from accurate import Accurate
  27. def single_case_eval(configPath, dataPath, resultPath, trackPath, case_name):
  28. """
  29. :param configPath: 配置文件路径
  30. :param dataPath: 评价数据路径
  31. :param resultPath: 评价结果路径
  32. :param trackPath: 轨迹图片路径
  33. :param case_name: 用例名称
  34. :return: None
  35. """
  36. # 获取日志记录器
  37. logger = log.get_logger()
  38. # 判断文件夹是否为空
  39. if len(os.listdir(dataPath)) == 0:
  40. print("No files in data_path!") # 路径异常
  41. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: No files in data_path!")
  42. sys.exit(-1)
  43. # 解析配置文件信息,获得各指标权重及评分系数
  44. try:
  45. config = ConfigParse(configPath)
  46. except Exception as e:
  47. print('Config file parsing ERROR!', e)
  48. traceback.print_exc()
  49. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Config file parsing ERROR: {repr(e)}!", exc_info=True)
  50. sys.exit(-1)
  51. # 单用例评价,并生成报告
  52. try:
  53. reportDict = single_case_evaluate(config, dataPath, resultPath, case_name) # 评估单用例
  54. # reportDict = single_case_statistic(case_report_dict) # 对单用例结果增加内容
  55. # 生成过程文件report.json
  56. with open(f'{resultPath}/report.json', 'w', encoding='utf-8') as f:
  57. f.write(json.dumps(reportDict, ensure_ascii=False))
  58. # 通过report.json生成报告
  59. reportPdf = os.path.join(resultPath, 'report.pdf')
  60. report_generate(reportDict, reportPdf, trackPath)
  61. except Exception as e:
  62. traceback.print_exc()
  63. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Evaluate single case ERROR: {repr(e)}!", exc_info=True)
  64. sys.exit(-1)
  65. def single_case_evaluate(config, dataPath, resultPath, case_name):
  66. """
  67. This function takes a str of single case' path and a dict of config info.
  68. The function uses the scripts of 5 dimensions to analyze and evaluate the single case.
  69. Arguments:
  70. dataPath: A str of a single case's csv files path.
  71. config: A dict of config info, which contains the algorithm info, index info and so on.
  72. resultPath: A str of report path.
  73. case_name: A str of case name.
  74. Returns:
  75. case_report_dict: A dict containing evaluation results. Basic info, scores and descriptions of 5 dimensions.
  76. """
  77. # 获取日志记录器
  78. logger = log.get_logger()
  79. # 数据处理
  80. try:
  81. data_processed = DataProcess(dataPath, config)
  82. except Exception as e:
  83. traceback.print_exc()
  84. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Data processed ERROR: {repr(e)}!", exc_info=True)
  85. sys.exit(-1)
  86. # 选择评分模型(默认内置模型)
  87. scorePath = r"./"
  88. scoreFile = "score_weight"
  89. scoreModel = import_class(scorePath, scoreFile)
  90. # initialization
  91. case_report_dict = {
  92. "name": case_name
  93. }
  94. # score info
  95. dimension_list = config.dimension_list
  96. score_dimension_dict = {}
  97. bad_dimension_list = []
  98. dimension_dict = {}
  99. # 分维度进行评价
  100. try:
  101. logger.info(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate start: ")
  102. for dimension in dimension_list:
  103. # 根据维度列表动态调用类
  104. dimension_instance = globals()[dimension.capitalize()](data_processed, scoreModel, resultPath)
  105. # 运行各维度评价函数,进行分析评价及报告内容统计
  106. dimension_report_dict = dimension_instance.report_statistic()
  107. dimension_dict[dimension] = dimension_report_dict
  108. score_dimension_dict[dimension] = dimension_report_dict['score']
  109. # 对各维度分数进行判断,分类表现差的维度
  110. if score_dimension_dict[dimension] < 80:
  111. bad_dimension_list.append(dimension)
  112. # if dimension == "comfort":
  113. # discomfortCount = dimension_report_dict['discomfortCount']
  114. # bad_indicator_list.append("不舒适行为") if discomfortCount != 0 else good_indicator_list.append("不舒适行为")
  115. # 异常打印,并退出
  116. except Exception as e:
  117. traceback.print_exc()
  118. logger.error(f"[case:{case_name}] SINGLE_CASE_EVAL: Dimension evaluate ERROR: {repr(e)}!", exc_info=True)
  119. sys.exit(-1)
  120. # 将评价结果写入报告字典内
  121. case_report_dict["details"] = dimension_dict
  122. # calculate the score and grade of the case
  123. weight_dimension_dict = config.dimension_weight
  124. score_dimension = {key: score_dimension_dict[key] * weight_dimension_dict[key] for key in score_dimension_dict}
  125. # 计算用例分数、等级,写入报告字典
  126. score_case = round(sum(score_dimension.values()), 2)
  127. grade_case = score_grade(score_case)
  128. case_report_dict['algorithmComprehensiveScore'] = score_case
  129. case_report_dict['algorithmLevel'] = grade_case
  130. # 计算用例里程和持续时长
  131. # case_report_dict['testMileage'] = data_processed.report_info['mileage']
  132. # case_report_dict['testDuration'] = data_processed.report_info['duration']
  133. case_report_dict['testMileage'] = mileage_format(data_processed.report_info['mileage'])
  134. case_report_dict['testDuration'] = duration_format(data_processed.report_info['duration'])
  135. # generate algorithmResultDescription2
  136. if not bad_dimension_list:
  137. algorithmResultDescription2 = '算法在各个维度的表现俱佳。'
  138. else:
  139. str_bad_dimension = string_concatenate(bad_dimension_list)
  140. # replace English to Chinese dimension name
  141. algorithmResultDescription2 = f'建议算法优化在{str_bad_dimension}指标上的表现。'
  142. algorithmResultDescription = f"算法得分{score_case}分,总体表现{grade_case}。{algorithmResultDescription2}"
  143. case_report_dict['algorithmResultDescription'] = algorithmResultDescription
  144. # 生成图片字典,将过程生成图片路径记录,用于报告生成
  145. graph_dict = {
  146. "speed": os.path.join(resultPath, "Speed.png"),
  147. "commandSpeed": os.path.join(resultPath, "CommandSpeed.png"),
  148. "linearAccelerate": os.path.join(resultPath, "LinearAccelerate.png"),
  149. "angularAccelerate": os.path.join(resultPath, "AngularAccelerate.png"),
  150. "trajectory": os.path.join(resultPath, "track.png")
  151. }
  152. case_report_dict['graphPath'] = graph_dict
  153. return case_report_dict