LingxinMeng 2 jaren geleden
bovenliggende
commit
4f258801b0

+ 468 - 472
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/consumer/ProjectConsumer.java

@@ -5,13 +5,13 @@ import api.common.pojo.constants.DictConstants;
 import api.common.pojo.dto.ProjectMessageDTO;
 import api.common.util.*;
 import com.css.simulation.resource.scheduler.entity.*;
-import com.css.simulation.resource.scheduler.service.ProjectManager;
-import com.css.simulation.resource.scheduler.service.TaskManager;
 import com.css.simulation.resource.scheduler.mapper.*;
+import com.css.simulation.resource.scheduler.service.ProjectManager;
 import com.css.simulation.resource.scheduler.service.ProjectService;
 import com.css.simulation.resource.scheduler.util.ApacheKafkaUtil;
 import com.css.simulation.resource.scheduler.util.MinioUtil;
 import com.css.simulation.resource.scheduler.util.ProjectUtil;
+import com.css.simulation.resource.scheduler.util.TaskUtil;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import io.minio.MinioClient;
@@ -35,490 +35,486 @@ import java.util.concurrent.TimeUnit;
 @Component
 @Slf4j
 public class ProjectConsumer {
-  @Value("${scheduler.linux-path.temp}")
-  private String linuxTempPath;
-  @Value("${scheduler.minio-path.project-result}")
-  private String projectResultPathOfMinio;
-  @Value("${minio.bucket-name}")
-  private String bucketName;
-
-  // -------------------------------- Comment --------------------------------
-  @Resource
-  private MinioClient minioClient;
-  @Resource
-  private StringRedisTemplate stringRedisTemplate;
-  @Resource
-  private ManualProjectMapper manualProjectMapper;
-  @Resource
-  private AutoSubProjectMapper autoSubProjectMapper;
-  @Resource
-  private VehicleMapper vehicleMapper;
-  @Resource
-  private SensorCameraMapper sensorCameraMapper;
-  @Resource
-  private SensorOgtMapper sensorOgtMapper;
-  @Resource
-  private AlgorithmMapper algorithmMapper;
-  @Resource
-  private UserMapper userMapper;
-  @Resource
-  private ClusterMapper clusterMapper;
-  @Resource
-  private ProjectService projectService;
-  @Resource
-  private ProjectUtil projectUtil;
-  @Resource
-  private IndexMapper indexMapper;
-  @Resource
-  private TaskMapper taskMapper;
-  @Resource
-  private TaskManager taskManager;
-  @Resource
-  private ProjectManager projectManager;
-  @Resource
-  private KafkaTemplate<String, String> kafkaTemplate;
-  @Resource(name = "myKafkaAdmin")
-  private Admin kafkaAdminClient;
-
-  /**
-   * 接收到运行信息立即复制一份数据作为运行数据
-   *
-   * @param projectRecord 项目启动消息
-   */
-  @KafkaListener(groupId = "simulation-resource-scheduler", topics = "${scheduler.start-topic}")
-  @SneakyThrows
-  public void acceptMessage(ConsumerRecord<String, String> projectRecord) {
-    final ProjectMessageDTO projectMessageDTO = JsonUtil.jsonToBean(projectRecord.value(), ProjectMessageDTO.class);
-    log.info("接收到项目开始消息为:" + projectMessageDTO);
-    new Thread(() -> createTaskAndFixData(projectMessageDTO), "fix-" + projectMessageDTO.getProjectId()).start();
-  }
-
-
-  @SneakyThrows
-  public void createTaskAndFixData(ProjectMessageDTO projectMessageDTO) {
-    //* -------------------------------- 0 读取消息,创建临时目录 --------------------------------
-    String projectId = projectMessageDTO.getProjectId();                // 手动执行项目 id 或 自动执行子项目 id
-    String modelType = projectMessageDTO.getModelType();                // 模型类型,1 动力学模型 2 carsim模型
-    String packageId = projectMessageDTO.getScenePackageId();           // 场景测试包 id
-    String vehicleConfigId = projectMessageDTO.getVehicleConfigId();    // 模型配置 id
-    String algorithmId = projectMessageDTO.getAlgorithmId();            // 模型配置 id
-    long videoTime = projectMessageDTO.getMaxSimulationTime();          // 结果视频的时长
-    String projectType = projectMessageDTO.getType();                   // 项目类型
-    String userId = "";  // 用户 id
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-      userId = manualProjectMapper.selectCreateUserById(projectId);
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-      userId = autoSubProjectMapper.selectCreateUserById(projectId);
+    @Value("${scheduler.linux-path.temp}")
+    private String linuxTempPath;
+    @Value("${scheduler.minio-path.project-result}")
+    private String projectResultPathOfMinio;
+    @Value("${minio.bucket-name}")
+    private String bucketName;
+
+    // -------------------------------- Comment --------------------------------
+    @Resource
+    private MinioClient minioClient;
+    @Resource
+    private StringRedisTemplate stringRedisTemplate;
+    @Resource
+    private ManualProjectMapper manualProjectMapper;
+    @Resource
+    private AutoSubProjectMapper autoSubProjectMapper;
+    @Resource
+    private VehicleMapper vehicleMapper;
+    @Resource
+    private SensorCameraMapper sensorCameraMapper;
+    @Resource
+    private SensorOgtMapper sensorOgtMapper;
+    @Resource
+    private AlgorithmMapper algorithmMapper;
+    @Resource
+    private UserMapper userMapper;
+    @Resource
+    private ClusterMapper clusterMapper;
+    @Resource
+    private ProjectService projectService;
+    @Resource
+    private ProjectUtil projectUtil;
+    @Resource
+    private IndexMapper indexMapper;
+    @Resource
+    private TaskMapper taskMapper;
+    @Resource
+    private TaskUtil taskUtil;
+    @Resource
+    private ProjectManager projectManager;
+    @Resource
+    private KafkaTemplate<String, String> kafkaTemplate;
+    @Resource(name = "myKafkaAdmin")
+    private Admin kafkaAdminClient;
+
+    /**
+     * 接收到运行信息立即复制一份数据作为运行数据
+     *
+     * @param projectRecord 项目启动消息
+     */
+    @KafkaListener(groupId = "simulation-resource-scheduler", topics = "${scheduler.start-topic}")
+    @SneakyThrows
+    public void acceptMessage(ConsumerRecord<String, String> projectRecord) {
+        final ProjectMessageDTO projectMessageDTO = JsonUtil.jsonToBean(projectRecord.value(), ProjectMessageDTO.class);
+        log.info("接收到项目开始消息为:" + projectMessageDTO);
+        new Thread(() -> createTaskAndFixData(projectMessageDTO), "fix-" + projectMessageDTO.getProjectId()).start();
     }
-    String projectPath = linuxTempPath + "project/" + projectId + "/";
-    FileUtil.mkdir(projectPath);
-    //5 将该 project 下所有旧的指标得分删除。
-    taskMapper.deleteByProject(projectId);
-    indexMapper.deleteFirstTargetScoreByProjectId(projectId);
-    indexMapper.deleteLastTargetScoreByProjectId(projectId);
-    // -------------------------------- 1 查询场景 --------------------------------
-    log.info("项目 " + projectId + " 开始查询场景。");
-    //根据场景测试包 packageId,拿到场景集合(包括重复场景),重复场景会在发送消息时根据叶子指标发送多次。
-    List<SceneEntity> sceneEntityList = projectService.getSceneList(projectId, packageId);
-    int taskTotal = sceneEntityList.size();
-    projectMessageDTO.setTaskTotal(taskTotal);
-    projectMessageDTO.setTaskCompleted(0);
-    //去重,之后发送消息的时候会补全指标,如果不去重的话会出现多个场景重复关联多个指标
-    Set<SceneEntity> sceneEntitySet = new HashSet<>(sceneEntityList);
-    log.info("项目 " + projectId + " 场景包括:" + sceneEntitySet);
-    // -------------------------------- 2 算法导入 --------------------------------
-    log.info("项目 " + projectId + " 开始算法导入。");
-    String algorithmDockerImage = projectService.handleAlgorithm(projectId, algorithmId);
-    log.info("项目 " + projectId + " 算法已导入:" + algorithmDockerImage);
-    // -------------------------------- 3 查询模型 --------------------------------
-    if ("1".equals(modelType)) {
-      log.info("项目 " + projectId + " 开始查询模型。");
-      //2-1 根据车辆配置id vehicleConfigId, 获取 模型信息和传感器信息
-      VehicleEntity vehicleEntity = vehicleMapper.selectByVehicleConfigId(vehicleConfigId);   // 车辆
-      List<CameraEntity> cameraEntityList = sensorCameraMapper.selectCameraByVehicleConfigId(vehicleConfigId);    // 摄像头
-      List<OgtEntity> ogtEntityList = sensorOgtMapper.selectOgtByVehicleId(vehicleConfigId); // 完美传感器
-      // -------------------------------- 4 保存任务消息 --------------------------------
-      log.info("项目 " + projectId + " 开始保存任务消息。");
-      List<TaskEntity> taskList = new ArrayList<>();
-      for (SceneEntity sceneEntity : sceneEntitySet) {
-        String sceneId = sceneEntity.getId();
-        //3-1 可能会存在多个指标下有同样的场景,所以会查出多个指标,多个指标的场景需要发送多次
-        List<String> lastTargetIdList = null;
-        if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-          lastTargetIdList = indexMapper.selectLeafIndexIdByManualProjectIdAndSceneId(projectId, "%" + sceneId + "%");
-        } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-          lastTargetIdList = indexMapper.selectLeafIndexIdByAutoSubProjectIdAndSceneId(projectId, "%" + sceneId + "%");
-        }
-        if (CollectionUtil.isEmpty(lastTargetIdList)) {
-          throw new RuntimeException("项目 " + projectId + " 使用的场景测试包 " + sceneId + " 不存在指标。");
-        }
-        for (String lastTargetId : lastTargetIdList) {
-          String taskId = StringUtil.getRandomUUID();
-          // 保存任务信息
-          TaskEntity taskEntity = TaskEntity.builder() // run_start_time 和 run_end_time 不填
-              .id(taskId).pId(projectId).sceneId(sceneId).lastTargetId(lastTargetId).sceneName(sceneEntity.getName()).sceneType(sceneEntity.getType()).runState(DictConstants.TASK_PENDING).runResultFilePath(projectResultPathOfMinio + projectId + "/" + taskId).build();
-          taskEntity.setCreateTime(TimeUtil.getNowForMysql());
-          taskEntity.setCreateUserId(userId);
-          taskEntity.setModifyTime(TimeUtil.getNowForMysql());
-          taskEntity.setModifyUserId(userId);
-          taskEntity.setModifyTime(TimeUtil.getNowForMysql());
-          taskEntity.setIsDeleted("0");
-          taskList.add(taskEntity);
-          // 将 xosc、xodr、osgb 全部上传到仿真结果路径
-          String scenarioOsc = sceneEntity.getScenarioOsc();
-          String[] splitXosc = scenarioOsc.split("/");
-          String xoscName = splitXosc[splitXosc.length - 1];
-          String[] xoscNameSplit = xoscName.split("\\.");
-          String xoscSuffix = xoscNameSplit[xoscNameSplit.length - 1];
-          String xoscPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xoscName;
-          String xoscPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xoscSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsc, xoscPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, xoscPathOfLinux, bucketName, xoscPathOfMinio);
-
-          String scenarioOdr = sceneEntity.getScenarioOdr();
-          String[] splitXodr = scenarioOdr.split("/");
-          String xodrName = splitXodr[splitXodr.length - 1];
-          String[] xodrNameSplit = xodrName.split("\\.");
-          String xodrSuffix = xodrNameSplit[xodrNameSplit.length - 1];
-          String xodrPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xodrName;
-          String xodrPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xodrSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOdr, xodrPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, xodrPathOfLinux, bucketName, xodrPathOfMinio);
-
-          String scenarioOsgb = sceneEntity.getScenarioOsgb();
-          String[] splitOsgb = scenarioOsgb.split("/");
-          String osgbName = splitOsgb[splitOsgb.length - 1];
-          String[] osgbNameSplit = osgbName.split("\\.");
-          String osgbSuffix = osgbNameSplit[osgbNameSplit.length - 1];
-          String osgbPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + osgbName;
-          String osgbPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + osgbSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsgb, osgbPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, osgbPathOfLinux, bucketName, osgbPathOfMinio);
-          log.info("已经将 xosc、xodr、osgb 上传到 minio 的结果文件目录:" + projectResultPathOfMinio);
-
-          // 组装 task 消息
-          TaskMessageEntity taskMessageEntity = TaskMessageEntity.builder().info(InfoEntity.builder().project_id(taskEntity.getPId()).task_id(taskEntity.getId()).task_path(taskEntity.getRunResultFilePath()).default_time(videoTime).build()).scenario(ScenarioEntity.builder().scenario_osc(xoscPathOfMinio).scenario_odr(xodrPathOfMinio).scenario_osgb(osgbPathOfMinio).build()).vehicle(VehicleTO.builder().model(ModelEntity.builder().model_label(vehicleEntity.getModelLabel()).build()).dynamics(DynamicsTO.builder().dynamics_maxspeed(vehicleEntity.getMaxSpeed()).dynamics_enginepower(vehicleEntity.getEnginePower()).dynamics_maxdecel(vehicleEntity.getMaxDeceleration()).dynamics_maxsteering(vehicleEntity.getMaxSteeringAngle()).dynamics_mass(vehicleEntity.getMass()).dynamics_frontsurfaceeffective(vehicleEntity.getFrontSurfaceEffective()).dynamics_airdragcoefficient(vehicleEntity.getAirDragCoefficient()).dynamics_rollingresistance(vehicleEntity.getRollingResistanceCoefficient()).dynamics_wheeldiameter(vehicleEntity.getWheelDiameter()).dynamics_wheeldrive(vehicleEntity.getWheelDrive()).dynamics_overallefficiency(vehicleEntity.getOverallEfficiency()).dynamics_distfront(vehicleEntity.getFrontDistance()).dynamics_distrear(vehicleEntity.getRearDistance()).dynamics_distleft(vehicleEntity.getLeftDistance()).dynamics_distright(vehicleEntity.getRightDistance()).dynamics_distheight(vehicleEntity.getHeightDistance()).dynamics_wheelbase(vehicleEntity.getWheelbase()).build()).sensors(SensorsEntity.builder()   // 根据 vehicleId 查询绑定的传感器列表
-              .camera(cameraEntityList).OGT(ogtEntityList).build()).build()).build();
-          FileUtil.writeStringToLocalFile(JsonUtil.beanToJson(taskMessageEntity), projectPath + taskId + ".json");
-          log.info("项目 " + projectId + " 将任务消息转成 json 保存到临时目录等待资源分配后执行:" + taskMessageEntity.getInfo().getTask_id());
+
+
+    public void createTaskAndFixData(ProjectMessageDTO projectMessageDTO) {
+        //* -------------------------------- 0 读取消息,创建临时目录 --------------------------------
+        String projectId = projectMessageDTO.getProjectId();                // 手动执行项目 id 或 自动执行子项目 id
+        String modelType = projectMessageDTO.getModelType();                // 模型类型,1 动力学模型 2 carsim模型
+        String packageId = projectMessageDTO.getScenePackageId();           // 场景测试包 id
+        String vehicleConfigId = projectMessageDTO.getVehicleConfigId();    // 模型配置 id
+        String algorithmId = projectMessageDTO.getAlgorithmId();            // 模型配置 id
+        long videoTime = projectMessageDTO.getMaxSimulationTime();          // 结果视频的时长
+        String projectType = projectMessageDTO.getType();                   // 项目类型
+        try {
+            String userId = "";  // 用户 id
+            if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+                userId = manualProjectMapper.selectCreateUserById(projectId);
+            } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+                userId = autoSubProjectMapper.selectCreateUserById(projectId);
+            }
+            String projectPath = linuxTempPath + "project/" + projectId + "/";
+            FileUtil.mkdir(projectPath);
+            //5 将该 project 下所有旧的指标得分删除。
+            taskMapper.deleteByProject(projectId);
+            indexMapper.deleteFirstTargetScoreByProjectId(projectId);
+            indexMapper.deleteLastTargetScoreByProjectId(projectId);
+            // -------------------------------- 1 查询场景 --------------------------------
+            log.info("项目 " + projectId + " 开始查询场景。");
+            //根据场景测试包 packageId,拿到场景集合(包括重复场景),重复场景会在发送消息时根据叶子指标发送多次。
+            List<SceneEntity> sceneEntityList = projectService.getSceneList(projectId, packageId);
+            int taskTotal = sceneEntityList.size();
+            projectMessageDTO.setTaskTotal(taskTotal);
+            projectMessageDTO.setTaskCompleted(0);
+            //去重,之后发送消息的时候会补全指标,如果不去重的话会出现多个场景重复关联多个指标
+            Set<SceneEntity> sceneEntitySet = new HashSet<>(sceneEntityList);
+            log.info("项目 " + projectId + " 场景包括:" + sceneEntitySet);
+            // -------------------------------- 2 算法导入 --------------------------------
+            log.info("项目 " + projectId + " 开始算法导入。");
+            String algorithmDockerImage = projectService.handleAlgorithm(projectId, algorithmId);
+            log.info("项目 " + projectId + " 算法已导入:" + algorithmDockerImage);
+            // -------------------------------- 3 查询模型 --------------------------------
+            if ("1".equals(modelType)) {
+                log.info("项目 " + projectId + " 开始查询模型。");
+                //2-1 根据车辆配置id vehicleConfigId, 获取 模型信息和传感器信息
+                VehicleEntity vehicleEntity = vehicleMapper.selectByVehicleConfigId(vehicleConfigId);   // 车辆
+                List<CameraEntity> cameraEntityList = sensorCameraMapper.selectCameraByVehicleConfigId(vehicleConfigId);    // 摄像头
+                List<OgtEntity> ogtEntityList = sensorOgtMapper.selectOgtByVehicleId(vehicleConfigId); // 完美传感器
+                // -------------------------------- 4 保存任务消息 --------------------------------
+                log.info("项目 " + projectId + " 开始保存任务消息。");
+                List<TaskEntity> taskList = new ArrayList<>();
+                for (SceneEntity sceneEntity : sceneEntitySet) {
+                    String sceneId = sceneEntity.getId();
+                    //3-1 可能会存在多个指标下有同样的场景,所以会查出多个指标,多个指标的场景需要发送多次
+                    List<String> lastTargetIdList = null;
+                    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+                        lastTargetIdList = indexMapper.selectLeafIndexIdByManualProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+                    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+                        lastTargetIdList = indexMapper.selectLeafIndexIdByAutoSubProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+                    }
+                    if (CollectionUtil.isEmpty(lastTargetIdList)) {
+                        throw new RuntimeException("项目 " + projectId + " 使用的场景测试包 " + sceneId + " 不存在指标。");
+                    }
+                    for (String lastTargetId : lastTargetIdList) {
+                        String taskId = StringUtil.getRandomUUID();
+                        // 保存任务信息
+                        TaskEntity taskEntity = TaskEntity.builder() // run_start_time 和 run_end_time 不填
+                                .id(taskId).pId(projectId).sceneId(sceneId).lastTargetId(lastTargetId).sceneName(sceneEntity.getName()).sceneType(sceneEntity.getType()).runState(DictConstants.TASK_PENDING).runResultFilePath(projectResultPathOfMinio + projectId + "/" + taskId).build();
+                        taskEntity.setCreateTime(TimeUtil.getNowForMysql());
+                        taskEntity.setCreateUserId(userId);
+                        taskEntity.setModifyTime(TimeUtil.getNowForMysql());
+                        taskEntity.setModifyUserId(userId);
+                        taskEntity.setModifyTime(TimeUtil.getNowForMysql());
+                        taskEntity.setIsDeleted("0");
+                        taskList.add(taskEntity);
+                        // 将 xosc、xodr、osgb 全部上传到仿真结果路径
+                        String scenarioOsc = sceneEntity.getScenarioOsc();
+                        String[] splitXosc = scenarioOsc.split("/");
+                        String xoscName = splitXosc[splitXosc.length - 1];
+                        String[] xoscNameSplit = xoscName.split("\\.");
+                        String xoscSuffix = xoscNameSplit[xoscNameSplit.length - 1];
+                        String xoscPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xoscName;
+                        String xoscPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xoscSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsc, xoscPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, xoscPathOfLinux, bucketName, xoscPathOfMinio);
+
+                        String scenarioOdr = sceneEntity.getScenarioOdr();
+                        String[] splitXodr = scenarioOdr.split("/");
+                        String xodrName = splitXodr[splitXodr.length - 1];
+                        String[] xodrNameSplit = xodrName.split("\\.");
+                        String xodrSuffix = xodrNameSplit[xodrNameSplit.length - 1];
+                        String xodrPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xodrName;
+                        String xodrPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xodrSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOdr, xodrPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, xodrPathOfLinux, bucketName, xodrPathOfMinio);
+
+                        String scenarioOsgb = sceneEntity.getScenarioOsgb();
+                        String[] splitOsgb = scenarioOsgb.split("/");
+                        String osgbName = splitOsgb[splitOsgb.length - 1];
+                        String[] osgbNameSplit = osgbName.split("\\.");
+                        String osgbSuffix = osgbNameSplit[osgbNameSplit.length - 1];
+                        String osgbPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + osgbName;
+                        String osgbPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + osgbSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsgb, osgbPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, osgbPathOfLinux, bucketName, osgbPathOfMinio);
+                        log.info("已经将 xosc、xodr、osgb 上传到 minio 的结果文件目录:" + projectResultPathOfMinio);
+
+                        // 组装 task 消息
+                        TaskMessageEntity taskMessageEntity = TaskMessageEntity.builder().info(InfoEntity.builder().project_id(taskEntity.getPId()).task_id(taskEntity.getId()).task_path(taskEntity.getRunResultFilePath()).default_time(videoTime).build()).scenario(ScenarioEntity.builder().scenario_osc(xoscPathOfMinio).scenario_odr(xodrPathOfMinio).scenario_osgb(osgbPathOfMinio).build()).vehicle(VehicleTO.builder().model(ModelEntity.builder().model_label(vehicleEntity.getModelLabel()).build()).dynamics(DynamicsTO.builder().dynamics_maxspeed(vehicleEntity.getMaxSpeed()).dynamics_enginepower(vehicleEntity.getEnginePower()).dynamics_maxdecel(vehicleEntity.getMaxDeceleration()).dynamics_maxsteering(vehicleEntity.getMaxSteeringAngle()).dynamics_mass(vehicleEntity.getMass()).dynamics_frontsurfaceeffective(vehicleEntity.getFrontSurfaceEffective()).dynamics_airdragcoefficient(vehicleEntity.getAirDragCoefficient()).dynamics_rollingresistance(vehicleEntity.getRollingResistanceCoefficient()).dynamics_wheeldiameter(vehicleEntity.getWheelDiameter()).dynamics_wheeldrive(vehicleEntity.getWheelDrive()).dynamics_overallefficiency(vehicleEntity.getOverallEfficiency()).dynamics_distfront(vehicleEntity.getFrontDistance()).dynamics_distrear(vehicleEntity.getRearDistance()).dynamics_distleft(vehicleEntity.getLeftDistance()).dynamics_distright(vehicleEntity.getRightDistance()).dynamics_distheight(vehicleEntity.getHeightDistance()).dynamics_wheelbase(vehicleEntity.getWheelbase()).build()).sensors(SensorsEntity.builder()   // 根据 vehicleId 查询绑定的传感器列表
+                                .camera(cameraEntityList).OGT(ogtEntityList).build()).build()).build();
+                        FileUtil.writeStringToLocalFile(JsonUtil.beanToJson(taskMessageEntity), projectPath + taskId + ".json");
+                        log.info("项目 " + projectId + " 将任务消息转成 json 保存到临时目录等待资源分配后执行:" + taskMessageEntity.getInfo().getTask_id());
+                    }
+                }
+                taskUtil.batchInsertTask(taskList);
+                log.info("项目 " + projectId + " 共有 " + taskList.size() + " 个任务,已保存到数据库");
+            } else if ("2".equals(modelType)) {
+                log.info("项目 " + projectId + " 开始查询模型。");
+
+                VehicleEntity vehicleEntity = vehicleMapper.selectByVehicleConfigId(vehicleConfigId);   // 车辆
+                List<CameraEntity> cameraEntityList = sensorCameraMapper.selectCameraByVehicleConfigId(vehicleConfigId);    // 摄像头
+                List<OgtEntity> ogtEntityList = sensorOgtMapper.selectOgtByVehicleId(vehicleConfigId); // 完美传感器
+                // -------------------------------- 4 保存任务消息 --------------------------------
+                log.info("项目 " + projectId + " 开始保存任务消息。");
+                List<TaskEntity> taskList = new ArrayList<>();
+                for (SceneEntity sceneEntity : sceneEntitySet) {
+                    String sceneId = sceneEntity.getId();
+                    //3-1 可能会存在多个指标下有同样的场景,所以会查出多个指标,多个指标的场景需要发送多次
+                    List<String> lastTargetIdList = null;
+                    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+                        lastTargetIdList = indexMapper.selectLeafIndexIdByManualProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+                    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+                        lastTargetIdList = indexMapper.selectLeafIndexIdByAutoSubProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+                    }
+                    if (CollectionUtil.isEmpty(lastTargetIdList)) {
+                        throw new RuntimeException("项目 " + projectId + " 使用的场景测试包 " + sceneId + " 不存在指标。");
+                    }
+                    for (String lastTargetId : lastTargetIdList) {
+                        String taskId = StringUtil.getRandomUUID();
+                        // 保存任务信息
+                        TaskEntity taskEntity = TaskEntity.builder() // run_start_time 和 run_end_time 不填
+                                .id(taskId).pId(projectId).sceneId(sceneId).lastTargetId(lastTargetId).sceneName(sceneEntity.getName()).sceneType(sceneEntity.getType()).runState(DictConstants.TASK_PENDING).runResultFilePath(projectResultPathOfMinio + projectId + "/" + taskId).build();
+                        taskEntity.setCreateTime(TimeUtil.getNowForMysql());
+                        taskEntity.setCreateUserId(userId);
+                        taskEntity.setModifyTime(TimeUtil.getNowForMysql());
+                        taskEntity.setModifyUserId(userId);
+                        taskEntity.setModifyTime(TimeUtil.getNowForMysql());
+                        taskEntity.setIsDeleted("0");
+                        taskList.add(taskEntity);
+                        // 将 xosc、xodr、osgb 全部上传到仿真结果路径
+                        String scenarioOsc = sceneEntity.getScenarioOsc();
+                        String[] splitXosc = scenarioOsc.split("/");
+                        String xoscName = splitXosc[splitXosc.length - 1];
+                        String[] xoscNameSplit = xoscName.split("\\.");
+                        String xoscSuffix = xoscNameSplit[xoscNameSplit.length - 1];
+                        String xoscPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xoscName;
+                        String xoscPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xoscSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsc, xoscPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, xoscPathOfLinux, bucketName, xoscPathOfMinio);
+
+                        String scenarioOdr = sceneEntity.getScenarioOdr();
+                        String[] splitXodr = scenarioOdr.split("/");
+                        String xodrName = splitXodr[splitXodr.length - 1];
+                        String[] xodrNameSplit = xodrName.split("\\.");
+                        String xodrSuffix = xodrNameSplit[xodrNameSplit.length - 1];
+                        String xodrPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xodrName;
+                        String xodrPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xodrSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOdr, xodrPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, xodrPathOfLinux, bucketName, xodrPathOfMinio);
+
+                        String scenarioOsgb = sceneEntity.getScenarioOsgb();
+                        String[] splitOsgb = scenarioOsgb.split("/");
+                        String osgbName = splitOsgb[splitOsgb.length - 1];
+                        String[] osgbNameSplit = osgbName.split("\\.");
+                        String osgbSuffix = osgbNameSplit[osgbNameSplit.length - 1];
+                        String osgbPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + osgbName;
+                        String osgbPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + osgbSuffix;
+                        MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsgb, osgbPathOfLinux);
+                        MinioUtil.uploadFromFile(minioClient, osgbPathOfLinux, bucketName, osgbPathOfMinio);
+                        log.info("已经将 xosc、xodr、osgb 上传到 minio 的结果文件目录:" + projectResultPathOfMinio);
+
+                        // 组装 task 消息
+                        // carsim 不需要查询模型参数
+                        TaskMessageEntity taskMessageEntity = TaskMessageEntity.builder().info(InfoEntity.builder().project_id(taskEntity.getPId()).task_id(taskEntity.getId()).task_path(taskEntity.getRunResultFilePath()).default_time(videoTime).build()).scenario(ScenarioEntity.builder().scenario_osc(xoscPathOfMinio).scenario_odr(xodrPathOfMinio).scenario_osgb(osgbPathOfMinio).build()).vehicle(VehicleTO.builder().model(ModelEntity.builder().model_label(vehicleEntity.getModelLabel()).build()).dynamics(null).sensors(SensorsEntity.builder()   // 根据 vehicleId 查询绑定的传感器列表
+                                .camera(cameraEntityList).OGT(ogtEntityList).build()).build()).build();
+
+                        FileUtil.writeStringToLocalFile(JsonUtil.beanToJson(taskMessageEntity), projectPath + taskId + ".json");
+                        log.info("项目 " + projectId + " 将任务消息转成 json 保存到临时目录等待资源分配后执行:" + taskMessageEntity.getInfo().getTask_id());
+                    }
+                }
+                taskUtil.batchInsertTask(taskList);
+                log.info("项目 " + projectId + " 共有 " + taskList.size() + " 个任务,已保存到数据库");
+            }
+
+            //* -------------------------------- 4 开始排队 --------------------------------
+            cacheProject(projectMessageDTO);
+        } catch (Exception e) {
+            log.error("项目报错。", e);
+            projectService.stopProject(projectId, projectType);
+            throw new RuntimeException(e);
         }
-      }
-      taskManager.batchInsertTask(taskList);
-      log.info("项目 " + projectId + " 共有 " + taskList.size() + " 个任务,已保存到数据库");
-    } else if ("2".equals(modelType)) {
-      log.info("项目 " + projectId + " 开始查询模型。");
-
-      VehicleEntity vehicleEntity = vehicleMapper.selectByVehicleConfigId(vehicleConfigId);   // 车辆
-      List<CameraEntity> cameraEntityList = sensorCameraMapper.selectCameraByVehicleConfigId(vehicleConfigId);    // 摄像头
-      List<OgtEntity> ogtEntityList = sensorOgtMapper.selectOgtByVehicleId(vehicleConfigId); // 完美传感器
-      // -------------------------------- 4 保存任务消息 --------------------------------
-      log.info("项目 " + projectId + " 开始保存任务消息。");
-      List<TaskEntity> taskList = new ArrayList<>();
-      for (SceneEntity sceneEntity : sceneEntitySet) {
-        String sceneId = sceneEntity.getId();
-        //3-1 可能会存在多个指标下有同样的场景,所以会查出多个指标,多个指标的场景需要发送多次
-        List<String> lastTargetIdList = null;
+
+    }
+
+
+    /**
+     * 任务运行前首先判断用户是否拥有可分配资源
+     *
+     * @param projectMessageDTO 项目启动消息
+     */
+    @SneakyThrows
+    public void cacheProject(ProjectMessageDTO projectMessageDTO) {
+        log.info("判断用户是否拥有可分配资源:" + projectMessageDTO);
+        //1 读取 kafka 的 project 信息
+        String projectId = projectMessageDTO.getProjectId();    // 手动执行项目 id 或 自动执行子项目 id
+        long parallelism = projectMessageDTO.getParallelism();   // 项目并行度
+        String projectType = projectMessageDTO.getType(); // 项目类型
+        //2 根据 projectId 获取创建用户 id
+        String userId;
         if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-          lastTargetIdList = indexMapper.selectLeafIndexIdByManualProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+            userId = manualProjectMapper.selectCreateUserById(projectId);
         } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-          lastTargetIdList = indexMapper.selectLeafIndexIdByAutoSubProjectIdAndSceneId(projectId, "%" + sceneId + "%");
+            userId = autoSubProjectMapper.selectCreateUserById(projectId);
+        } else {
+            log.error("项目类型错误:" + projectMessageDTO);
+            return;
         }
-        if (CollectionUtil.isEmpty(lastTargetIdList)) {
-          throw new RuntimeException("项目 " + projectId + " 使用的场景测试包 " + sceneId + " 不存在指标。");
+        if (StringUtil.isEmpty(userId)) {
+            log.error("未查询到项目创建人:" + projectMessageDTO);
+            return;
         }
-        for (String lastTargetId : lastTargetIdList) {
-          String taskId = StringUtil.getRandomUUID();
-          // 保存任务信息
-          TaskEntity taskEntity = TaskEntity.builder() // run_start_time 和 run_end_time 不填
-              .id(taskId).pId(projectId).sceneId(sceneId).lastTargetId(lastTargetId).sceneName(sceneEntity.getName()).sceneType(sceneEntity.getType()).runState(DictConstants.TASK_PENDING).runResultFilePath(projectResultPathOfMinio + projectId + "/" + taskId).build();
-          taskEntity.setCreateTime(TimeUtil.getNowForMysql());
-          taskEntity.setCreateUserId(userId);
-          taskEntity.setModifyTime(TimeUtil.getNowForMysql());
-          taskEntity.setModifyUserId(userId);
-          taskEntity.setModifyTime(TimeUtil.getNowForMysql());
-          taskEntity.setIsDeleted("0");
-          taskList.add(taskEntity);
-          // 将 xosc、xodr、osgb 全部上传到仿真结果路径
-          String scenarioOsc = sceneEntity.getScenarioOsc();
-          String[] splitXosc = scenarioOsc.split("/");
-          String xoscName = splitXosc[splitXosc.length - 1];
-          String[] xoscNameSplit = xoscName.split("\\.");
-          String xoscSuffix = xoscNameSplit[xoscNameSplit.length - 1];
-          String xoscPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xoscName;
-          String xoscPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xoscSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsc, xoscPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, xoscPathOfLinux, bucketName, xoscPathOfMinio);
-
-          String scenarioOdr = sceneEntity.getScenarioOdr();
-          String[] splitXodr = scenarioOdr.split("/");
-          String xodrName = splitXodr[splitXodr.length - 1];
-          String[] xodrNameSplit = xodrName.split("\\.");
-          String xodrSuffix = xodrNameSplit[xodrNameSplit.length - 1];
-          String xodrPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + xodrName;
-          String xodrPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + xodrSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOdr, xodrPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, xodrPathOfLinux, bucketName, xodrPathOfMinio);
-
-          String scenarioOsgb = sceneEntity.getScenarioOsgb();
-          String[] splitOsgb = scenarioOsgb.split("/");
-          String osgbName = splitOsgb[splitOsgb.length - 1];
-          String[] osgbNameSplit = osgbName.split("\\.");
-          String osgbSuffix = osgbNameSplit[osgbNameSplit.length - 1];
-          String osgbPathOfLinux = linuxTempPath + "video/" + projectId + "/" + taskId + "/" + osgbName;
-          String osgbPathOfMinio = projectResultPathOfMinio + projectId + "/" + taskId + "/" + taskId + "." + osgbSuffix;
-          MinioUtil.downloadToFile(minioClient, bucketName, scenarioOsgb, osgbPathOfLinux);
-          MinioUtil.uploadFromFile(minioClient, osgbPathOfLinux, bucketName, osgbPathOfMinio);
-          log.info("已经将 xosc、xodr、osgb 上传到 minio 的结果文件目录:" + projectResultPathOfMinio);
-
-          // 组装 task 消息
-          // carsim 不需要查询模型参数
-          TaskMessageEntity taskMessageEntity = TaskMessageEntity.builder().info(InfoEntity.builder().project_id(taskEntity.getPId()).task_id(taskEntity.getId()).task_path(taskEntity.getRunResultFilePath()).default_time(videoTime).build()).scenario(ScenarioEntity.builder().scenario_osc(xoscPathOfMinio).scenario_odr(xodrPathOfMinio).scenario_osgb(osgbPathOfMinio).build()).vehicle(VehicleTO.builder().model(ModelEntity.builder().model_label(vehicleEntity.getModelLabel()).build()).dynamics(null).sensors(SensorsEntity.builder()   // 根据 vehicleId 查询绑定的传感器列表
-              .camera(cameraEntityList).OGT(ogtEntityList).build()).build()).build();
-
-          FileUtil.writeStringToLocalFile(JsonUtil.beanToJson(taskMessageEntity), projectPath + taskId + ".json");
-          log.info("项目 " + projectId + " 将任务消息转成 json 保存到临时目录等待资源分配后执行:" + taskMessageEntity.getInfo().getTask_id());
+        //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
+        UserEntity userEntity = userMapper.selectById(userId);
+        log.info("项目 " + projectId + " 的创建人为:" + userEntity);
+        String roleCode = userEntity.getRoleCode();
+        String useType = userEntity.getUseType();
+        ClusterEntity clusterEntity;
+        if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
+            log.info("项目 " + projectId + " 的创建人 " + userId + " 为管理员账户或管理员子账户,直接判断服务器能否执行。");
+            PrefixEntity redisPrefix = projectUtil.getRedisPrefixByClusterIdAndProjectId(DictConstants.SYSTEM_CLUSTER_ID, projectId);
+            run(projectMessageDTO, DictConstants.SYSTEM_CLUSTER_ID, redisPrefix.getProjectRunningKey(), redisPrefix.getProjectWaitingKey());
+            return;
+        } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
+            clusterEntity = clusterMapper.selectByUserId(userId);
+            log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通账户(包括独占或共享都在自己的集群),集群为:" + clusterEntity);
+        } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
+            if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
+                clusterEntity = clusterMapper.selectByUserId(userId);
+                log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通独占子账户(自己的集群),集群为:" + clusterEntity);
+            } else {    //3-4 共享子账户,根据父账户的共享节点排队
+                String parentUserId = userEntity.getCreateUserId();
+                clusterEntity = clusterMapper.selectByUserId(parentUserId);
+                log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通共享子账户(父账户的集群),集群为:" + clusterEntity);
+            }
+        } else {
+            log.error("项目 " + projectId + " 的创建人 " + userId + " 为未知账户类型,不予执行!");
+            return;
+        }
+        // 获取拥有的节点数量,即仿真软件证书数量
+        String clusterId = clusterEntity.getId();
+        int simulationLicenseNumber = clusterEntity.getNumSimulationLicense();
+        // 获取该集群中正在运行的项目,如果没有则立即执行
+        PrefixEntity redisPrefix = projectUtil.getRedisPrefixByClusterIdAndProjectId(clusterId, projectId);
+        // 获取正在运行的项目的并行度总和
+        int currentParallelismSum = projectUtil.getCurrentParallelismSum(redisPrefix.getClusterRunningPrefix());
+        // 如果执行后的并行度总和小于最大节点数则执行,否则不执行
+        if (currentParallelismSum + parallelism <= simulationLicenseNumber) {
+            run(projectMessageDTO, clusterId, redisPrefix.getProjectRunningKey(), redisPrefix.getProjectWaitingKey());
+        } else {
+            log.info("项目 " + projectId + " 并行度超出账户允许,加入等待队列,暂不执行。 ");
+            wait(redisPrefix.getProjectWaitingKey(), projectMessageDTO);
         }
-      }
-      taskManager.batchInsertTask(taskList);
-      log.info("项目 " + projectId + " 共有 " + taskList.size() + " 个任务,已保存到数据库");
     }
 
-    //* -------------------------------- 4 开始排队 --------------------------------
-    cacheProject(projectMessageDTO);
-
-  }
-
-
-  /**
-   * 任务运行前首先判断用户是否拥有可分配资源
-   *
-   * @param projectMessageDTO 项目启动消息
-   */
-  @SneakyThrows
-  public void cacheProject(ProjectMessageDTO projectMessageDTO) {
-    log.info("判断用户是否拥有可分配资源:" + projectMessageDTO);
-    //1 读取 kafka 的 project 信息
-    String projectId = projectMessageDTO.getProjectId();    // 手动执行项目 id 或 自动执行子项目 id
-    long parallelism = projectMessageDTO.getParallelism();   // 项目并行度
-    String projectType = projectMessageDTO.getType(); // 项目类型
-    //2 根据 projectId 获取创建用户 id
-    String userId;
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-      userId = manualProjectMapper.selectCreateUserById(projectId);
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-      userId = autoSubProjectMapper.selectCreateUserById(projectId);
-    } else {
-      log.error("项目类型错误:" + projectMessageDTO);
-      return;
-    }
-    if (StringUtil.isEmpty(userId)) {
-      log.error("未查询到项目创建人:" + projectMessageDTO);
-      return;
-    }
-    //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
-    UserEntity userEntity = userMapper.selectById(userId);
-    log.info("项目 " + projectId + " 的创建人为:" + userEntity);
-    String roleCode = userEntity.getRoleCode();
-    String useType = userEntity.getUseType();
-    ClusterEntity clusterEntity;
-    if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
-      log.info("项目 " + projectId + " 的创建人 " + userId + " 为管理员账户或管理员子账户,直接判断服务器能否执行。");
-      PrefixEntity redisPrefix = projectUtil.getRedisPrefixByClusterIdAndProjectId(DictConstants.SYSTEM_CLUSTER_ID, projectId);
-      run(projectMessageDTO, DictConstants.SYSTEM_CLUSTER_ID, redisPrefix.getProjectRunningKey(), redisPrefix.getProjectWaitingKey());
-      return;
-    } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
-      clusterEntity = clusterMapper.selectByUserId(userId);
-      log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通账户(包括独占或共享都在自己的集群),集群为:" + clusterEntity);
-    } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
-      if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
-        clusterEntity = clusterMapper.selectByUserId(userId);
-        log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通独占子账户(自己的集群),集群为:" + clusterEntity);
-      } else {    //3-4 共享子账户,根据父账户的共享节点排队
-        String parentUserId = userEntity.getCreateUserId();
-        clusterEntity = clusterMapper.selectByUserId(parentUserId);
-        log.info("项目 " + projectId + " 的创建人 " + userId + " 为普通共享子账户(父账户的集群),集群为:" + clusterEntity);
-      }
-    } else {
-      log.error("项目 " + projectId + " 的创建人 " + userId + " 为未知账户类型,不予执行!");
-      return;
-    }
-    // 获取拥有的节点数量,即仿真软件证书数量
-    String clusterId = clusterEntity.getId();
-    int simulationLicenseNumber = clusterEntity.getNumSimulationLicense();
-    // 获取该集群中正在运行的项目,如果没有则立即执行
-    PrefixEntity redisPrefix = projectUtil.getRedisPrefixByClusterIdAndProjectId(clusterId, projectId);
-    // 获取正在运行的项目的并行度总和
-    int currentParallelismSum = projectUtil.getCurrentParallelismSum(redisPrefix.getClusterRunningPrefix());
-    // 如果执行后的并行度总和小于最大节点数则执行,否则不执行
-    if (currentParallelismSum + parallelism <= simulationLicenseNumber) {
-      run(projectMessageDTO, clusterId, redisPrefix.getProjectRunningKey(), redisPrefix.getProjectWaitingKey());
-    } else {
-      log.info("项目 " + projectId + " 并行度超出账户允许,加入等待队列,暂不执行。 ");
-      wait(redisPrefix.getProjectWaitingKey(), projectMessageDTO);
-    }
-  }
-
-  //* -------------------------------- 运行 --------------------------------
-
-  /**
-   * @param projectMessageDTO 初始接收到的项目启动信息
-   * @param clusterId         集群 id
-   * @param projectRunningKey projectRunningKey
-   * @param projectWaitingKey projectWaitingKey
-   */
-  public void run(ProjectMessageDTO projectMessageDTO, String clusterId, String projectRunningKey, String projectWaitingKey) {
-
-    String projectId = projectMessageDTO.getProjectId();
-    int parallelism = projectMessageDTO.getParallelism();  // 期望并行度
-    //1 获取集群剩余可用并行度
-    int restParallelism = projectUtil.getRestParallelism();
-    //2 判断剩余可用并行度是否大于项目并行度,否则加入扩充队列
-    if (restParallelism > 0L) {
-      log.info("集群 " + clusterId + " 执行项目 " + projectId);
-      // 设置实际的并行度
-      projectMessageDTO.setCurrentParallelism(Math.min(restParallelism, parallelism));   // 设置实际的并行度
-      parseProject(projectMessageDTO, projectRunningKey);
-    } else {
-      log.info("服务器资源不够,项目 " + projectId + " 暂时加入等待队列。");
-      wait(projectWaitingKey, projectMessageDTO);
+    //* -------------------------------- 运行 --------------------------------
+
+    /**
+     * @param projectMessageDTO 初始接收到的项目启动信息
+     * @param clusterId         集群 id
+     * @param projectRunningKey projectRunningKey
+     * @param projectWaitingKey projectWaitingKey
+     */
+    public void run(ProjectMessageDTO projectMessageDTO, String clusterId, String projectRunningKey, String projectWaitingKey) {
+
+        String projectId = projectMessageDTO.getProjectId();
+        int parallelism = projectMessageDTO.getParallelism();  // 期望并行度
+        //1 获取集群剩余可用并行度
+        int restParallelism = projectUtil.getRestParallelism();
+        //2 判断剩余可用并行度是否大于项目并行度,否则加入扩充队列
+        if (restParallelism > 0L) {
+            log.info("集群 " + clusterId + " 执行项目 " + projectId);
+            // 设置实际的并行度
+            projectMessageDTO.setCurrentParallelism(Math.min(restParallelism, parallelism));   // 设置实际的并行度
+            parseProject(projectMessageDTO, projectRunningKey);
+        } else {
+            log.info("服务器资源不够,项目 " + projectId + " 暂时加入等待队列。");
+            wait(projectWaitingKey, projectMessageDTO);
+        }
     }
-  }
-
-  /**
-   * @param projectMessageDTO 初始接收到的项目启动信息
-   * @param projectRunningKey projectRunningKey
-   */
-  @SneakyThrows
-  public void parseProject(ProjectMessageDTO projectMessageDTO, String projectRunningKey) {
-    String projectId = projectMessageDTO.getProjectId();    // 项目 id
-    String modelType = projectMessageDTO.getModelType();
-    String vehicleConfigId = projectMessageDTO.getVehicleConfigId();
-    ProjectEntity projectEntity = projectUtil.getProjectByProjectId(projectId);
-    String isChoiceGpu = projectEntity.getIsChoiceGpu();
-    int currentParallelism = projectMessageDTO.getCurrentParallelism();   // 当前并行度
-    String algorithmId = projectMessageDTO.getAlgorithmId();    // 算法 id
-    String projectPath = linuxTempPath + "project/" + projectId + "/";
-    // -------------------------------- 1 获取任务 json 列表 --------------------------------
-    List<String> taskJsonList = FileUtil.listAbsolutePathByTypeAndLength(projectPath, "json", 37);
-    int taskTotal = taskJsonList.size();
-    projectMessageDTO.setTaskTotal(taskTotal);
-    projectMessageDTO.setTaskCompleted(0);
-    // 设置任务数量之后,获取运行节点,并将项目运行信息放入 redis
-    //1 获取剩余并行度和即将使用的各node的并行度
-    Map<String, Integer> nodeMap0 = projectUtil.getNodeMap();
-    Map<String, Integer> nodeMap = projectUtil.getNodeMapToUse(Math.min(currentParallelism, taskTotal));
-    //2 将指定 node 的并行度减少
-    nodeMap.keySet().forEach(nodeName -> projectUtil.decrementParallelismOfGpuNode(nodeName, nodeMap.get(nodeName)));
-    // 重新设置实际使用的并行度并保存到 redis
-    int realCurrentParallelism = nodeMap.values().stream().mapToInt(parallelism -> parallelism).sum();
-    projectMessageDTO.setCurrentParallelism(realCurrentParallelism);
-    log.info("项目 " + projectId + " 运行在:" + nodeMap);
-    stringRedisTemplate.opsForValue().set(projectRunningKey, JsonUtil.beanToJson(projectMessageDTO));
-    //* -------------------------------- 3 根据算法id查询算法名称 --------------------------------
-    String algorithmDockerImage = algorithmMapper.selectDockerImageById(algorithmId);
-    // -------------------------------- 4 发送任务消息 --------------------------------
-    List<NodeEntity> nodeListToCount = projectUtil.getNodeListToCount(nodeMap);
-    int messageNumber = 0;
-    ApacheKafkaUtil.createTopic(kafkaAdminClient, projectId, realCurrentParallelism, (short) 1);   // 创建主题
-    TimeUnit.SECONDS.sleep(7);
-    // 需要即时启动的任务(并行度的大小)
-    CopyOnWriteArrayList<String> yamlToRunRedisKeyList = new CopyOnWriteArrayList<>();
-    for (String taskJsonPath : taskJsonList) {
-      String taskId = FileUtil.getFilenameWithoutSuffix(taskJsonPath);
-      // 保存运行中的任务信息
-      String taskMessageKey = projectRunningKey + ":task:" + taskId + ":message";
-      String taskJson = FileUtil.read(taskJsonPath);
-      stringRedisTemplate.opsForValue().set(taskMessageKey, taskJson);
-
-      //4-5 将 projectId 作为 topic 名称,根据 parallelism 分散发送 task 信息到 kafka
-      SendResult<String, String> stringStringSendResult = kafkaTemplate.send(projectId, messageNumber % currentParallelism, taskId, taskJson).get();
-      RecordMetadata recordMetadata = stringStringSendResult.getRecordMetadata();
-      String topic = recordMetadata.topic();  // 消息发送到的topic
-      int partition = recordMetadata.partition(); // 消息发送到的分区
-      long offset = recordMetadata.offset();  // 消息在分区内的offset
-      log.info("发送消息成功, 主题 topic 为:" + topic + " 分区 partition 为:" + partition + " 偏移量为:" + offset + " 消息体为:" + taskJson);
-      //4-6 发送成功过的同时创建 pod.yaml 文件并把文件地址存到 redis
-      // 选一个 count 最少的 node
-      String currentNodeName = "";
-      NodeEntity currentNodeEntity = null;
-      int currentCount = Integer.MAX_VALUE;
-      log.info("各节点已经预定的任务个数为:" + nodeListToCount);
-      for (NodeEntity nodeEntity : nodeListToCount) {
-        int tempCount = nodeEntity.getCount();
-        String tempNodeName = nodeEntity.getNodeName();
-        if (tempCount < currentCount) {
-          currentCount = tempCount;
-          currentNodeName = tempNodeName;
-          currentNodeEntity = nodeEntity;
+
+    /**
+     * @param projectMessageDTO 初始接收到的项目启动信息
+     * @param projectRunningKey projectRunningKey
+     */
+    @SneakyThrows
+    public void parseProject(ProjectMessageDTO projectMessageDTO, String projectRunningKey) {
+        String projectId = projectMessageDTO.getProjectId();    // 项目 id
+        String modelType = projectMessageDTO.getModelType();
+        String vehicleConfigId = projectMessageDTO.getVehicleConfigId();
+        ProjectEntity projectEntity = projectUtil.getProjectByProjectId(projectId);
+        String isChoiceGpu = projectEntity.getIsChoiceGpu();
+        int currentParallelism = projectMessageDTO.getCurrentParallelism();   // 当前并行度
+        String algorithmId = projectMessageDTO.getAlgorithmId();    // 算法 id
+        String projectPath = linuxTempPath + "project/" + projectId + "/";
+        // -------------------------------- 1 获取任务 json 列表 --------------------------------
+        List<String> taskJsonList = FileUtil.listAbsolutePathByTypeAndLength(projectPath, "json", 37);
+        int taskTotal = taskJsonList.size();
+        projectMessageDTO.setTaskTotal(taskTotal);
+        projectMessageDTO.setTaskCompleted(0);
+        // 设置任务数量之后,获取运行节点,并将项目运行信息放入 redis
+        //1 获取剩余并行度和即将使用的各node的并行度
+        Map<String, Integer> nodeMap0 = projectUtil.getNodeMap();
+        Map<String, Integer> nodeMap = projectUtil.getNodeMapToUse(Math.min(currentParallelism, taskTotal));
+        //2 将指定 node 的并行度减少
+        nodeMap.keySet().forEach(nodeName -> projectUtil.decrementParallelismOfGpuNode(nodeName, nodeMap.get(nodeName)));
+        // 重新设置实际使用的并行度并保存到 redis
+        int realCurrentParallelism = nodeMap.values().stream().mapToInt(parallelism -> parallelism).sum();
+        projectMessageDTO.setCurrentParallelism(realCurrentParallelism);
+        log.info("项目 " + projectId + " 运行在:" + nodeMap);
+        stringRedisTemplate.opsForValue().set(projectRunningKey, JsonUtil.beanToJson(projectMessageDTO));
+        //* -------------------------------- 3 根据算法id查询算法名称 --------------------------------
+        String algorithmDockerImage = algorithmMapper.selectDockerImageById(algorithmId);
+        // -------------------------------- 4 发送任务消息 --------------------------------
+        List<NodeEntity> nodeListToCount = projectUtil.getNodeListToCount(nodeMap);
+        int messageNumber = 0;
+        ApacheKafkaUtil.createTopic(kafkaAdminClient, projectId, realCurrentParallelism, (short) 1);   // 创建主题
+        TimeUnit.SECONDS.sleep(7);
+        // 需要即时启动的任务(并行度的大小)
+        CopyOnWriteArrayList<String> yamlToRunRedisKeyList = new CopyOnWriteArrayList<>();
+        for (String taskJsonPath : taskJsonList) {
+            String taskId = FileUtil.getFilenameWithoutSuffix(taskJsonPath);
+            // 保存运行中的任务信息
+            String taskMessageKey = projectRunningKey + ":task:" + taskId + ":message";
+            String taskJson = FileUtil.read(taskJsonPath);
+            stringRedisTemplate.opsForValue().set(taskMessageKey, taskJson);
+
+            //4-5 将 projectId 作为 topic 名称,根据 parallelism 分散发送 task 信息到 kafka
+            SendResult<String, String> stringStringSendResult = kafkaTemplate.send(projectId, messageNumber % currentParallelism, taskId, taskJson).get();
+            RecordMetadata recordMetadata = stringStringSendResult.getRecordMetadata();
+            String topic = recordMetadata.topic();  // 消息发送到的topic
+            int partition = recordMetadata.partition(); // 消息发送到的分区
+            long offset = recordMetadata.offset();  // 消息在分区内的offset
+            log.info("发送消息成功, 主题 topic 为:" + topic + " 分区 partition 为:" + partition + " 偏移量为:" + offset + " 消息体为:" + taskJson);
+            //4-6 发送成功过的同时创建 pod.yaml 文件并把文件地址存到 redis
+            // 选一个 count 最少的 node
+            String currentNodeName = "";
+            NodeEntity currentNodeEntity = null;
+            int currentCount = Integer.MAX_VALUE;
+            log.info("各节点已经预定的任务个数为:" + nodeListToCount);
+            for (NodeEntity nodeEntity : nodeListToCount) {
+                int tempCount = nodeEntity.getCount();
+                String tempNodeName = nodeEntity.getNodeName();
+                if (tempCount < currentCount) {
+                    currentCount = tempCount;
+                    currentNodeName = tempNodeName;
+                    currentNodeEntity = nodeEntity;
+                }
+            }
+            if (currentNodeEntity == null) {
+                String errorMessage = "挑选节点失败。";
+                log.info(errorMessage);
+                throw new RuntimeException(errorMessage);
+            }
+            currentNodeEntity.setCount(currentNodeEntity.getCount() + 1);
+            Integer cpuOrder = null;
+            if (currentCount == 0) {
+                // 根据各节点剩余并行度,倒序获取 cpu 编号
+                cpuOrder = nodeMap0.get(currentNodeName) - 1;
+                nodeMap0.put(currentNodeName, cpuOrder);
+            }
+            // 只有准备启动(即 currentCount == 0)的时候才指定 cpu 编号
+            log.info("创建任务 " + taskId + " 的 yaml:是否使用 gpu (0是1否)" + isChoiceGpu + ",当前节点已创建 yaml 个数为:" + currentCount + ",当前节点名称为:" + currentNodeName + ",当前 cpu 编号为:" + cpuOrder);
+            String yamlRedisKey = projectManager.createTempYaml(projectId, vehicleConfigId, modelType, algorithmDockerImage, currentNodeName, partition, offset, isChoiceGpu, cpuOrder);
+
+            if (currentCount == 0) {
+                String podName = yamlRedisKey.split(":")[yamlRedisKey.split(":").length - 1];
+                log.info("将 pod 加入到启动列表 " + podName);
+                yamlToRunRedisKeyList.add(yamlRedisKey);
+            }
+            messageNumber++;
+        }
+        TimeUnit.SECONDS.sleep(6);
+        log.info("项目 " + projectId + " 共发送了 " + messageNumber + " 条消息,准备首先启动 " + yamlToRunRedisKeyList);
+        for (String redisKey : yamlToRunRedisKeyList) {
+            projectUtil.createPodBegin(projectId, redisKey);
         }
-      }
-      if (currentNodeEntity == null) {
-        String errorMessage = "挑选节点失败。";
-        log.info(errorMessage);
-        throw new RuntimeException(errorMessage);
-      }
-      currentNodeEntity.setCount(currentNodeEntity.getCount() + 1);
-      Integer cpuOrder = null;
-      if (currentCount == 0) {
-        // 根据各节点剩余并行度,倒序获取 cpu 编号
-        cpuOrder = nodeMap0.get(currentNodeName) - 1;
-        nodeMap0.put(currentNodeName, cpuOrder);
-      }
-      // 只有准备启动(即 currentCount == 0)的时候才指定 cpu 编号
-      log.info("创建任务 " + taskId + " 的 yaml:是否使用 gpu (0是1否)" + isChoiceGpu + ",当前节点已创建 yaml 个数为:" + currentCount + ",当前节点名称为:" + currentNodeName + ",当前 cpu 编号为:" + cpuOrder);
-      String yamlRedisKey = projectManager.createTempYaml(projectId, vehicleConfigId, modelType, algorithmDockerImage, currentNodeName, partition, offset, isChoiceGpu, cpuOrder);
-
-      if (currentCount == 0) {
-        String podName = yamlRedisKey.split(":")[yamlRedisKey.split(":").length - 1];
-        log.info("将 pod 加入到启动列表 " + podName);
-        yamlToRunRedisKeyList.add(yamlRedisKey);
-      }
-      messageNumber++;
+        log.info("项目 " + projectId + " 已经启动 " + yamlToRunRedisKeyList);
     }
-    TimeUnit.SECONDS.sleep(6);
-    log.info("项目 " + projectId + " 共发送了 " + messageNumber + " 条消息,准备首先启动 " + yamlToRunRedisKeyList);
-    for (String redisKey : yamlToRunRedisKeyList) {
-      projectUtil.createPodBegin(projectId, redisKey);
+
+
+    //* -------------------------------- 等待 --------------------------------
+
+    /**
+     * @param projectWaitingKey 项目等待 key
+     * @param projectMessageDTO 项目信息
+     */
+    public void wait(String projectWaitingKey, ProjectMessageDTO projectMessageDTO) {
+        stringRedisTemplate.opsForValue().set(projectWaitingKey, JsonUtil.beanToJson(projectMessageDTO));
     }
-    log.info("项目 " + projectId + " 已经启动 " + yamlToRunRedisKeyList);
-  }
-
-
-  //* -------------------------------- 等待 --------------------------------
-
-  /**
-   * @param projectWaitingKey 项目等待 key
-   * @param projectMessageDTO 项目信息
-   */
-  public void wait(String projectWaitingKey, ProjectMessageDTO projectMessageDTO) {
-    stringRedisTemplate.opsForValue().set(projectWaitingKey, JsonUtil.beanToJson(projectMessageDTO));
-  }
-  //* -------------------------------- 结束 --------------------------------
-
-  /**
-   * {
-   * "projectId": "sadfasdfs",	// 项目 id
-   * "type": "1",	// 项目类型
-   * }
-   */
-  @KafkaListener(groupId = "simulation-resource-scheduler", topics = "${scheduler.stop-topic}")
-  @SneakyThrows
-  public void stopProject(ConsumerRecord<String, String> stopRecord) {
-    log.info("接收到的项目终止消息为:" + stopRecord);
-    //1 读取 kafka 的项目停止信息
-    String json = stopRecord.value();
-    ObjectMapper objectMapper = new ObjectMapper();
-    JsonNode jsonNode = objectMapper.readTree(json);
-    // 将项目状态修改为终止中
-    String projectId = jsonNode.path("projectId").asText();
-    String type = jsonNode.path("type").asText();
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(type)) {
-      manualProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_TERMINATING, TimeUtil.getNowForMysql());
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(type)) {
-      autoSubProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_TERMINATING, TimeUtil.getNowForMysql());
+    //* -------------------------------- 结束 --------------------------------
+
+    /**
+     * {
+     * "projectId": "sadfasdfs",	// 项目 id
+     * "type": "1",	// 项目类型
+     * }
+     */
+    @KafkaListener(groupId = "simulation-resource-scheduler", topics = "${scheduler.stop-topic}")
+    @SneakyThrows
+    public void stopProject(ConsumerRecord<String, String> stopRecord) {
+        log.info("接收到的项目终止消息为:" + stopRecord);
+        JsonNode jsonNode = new ObjectMapper().readTree(stopRecord.value());
+        String projectId = jsonNode.path("projectId").asText();
+        String type = jsonNode.path("type").asText();
+        projectService.stopProject(projectId, type);
     }
-    projectService.stopProject(projectId, type);
-  }
 
 
 }

+ 28 - 28
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/controller/TaskController.java

@@ -15,34 +15,34 @@ import javax.annotation.Resource;
 @RequestMapping("/task")
 public class TaskController {
 
-  @Resource
-  private TaskService taskService;
-
-  // -------------------------------- Comment --------------------------------
-
-  /**
-   * 修改任务状态
-   */
-  @GetMapping("/state")
-  public void taskState(@RequestParam("taskId") String taskId, @RequestParam("state") String state, @RequestParam("podName") String podName) {
-    taskService.taskState(taskId, state, podName);
-  }
-
-  /**
-   * Pod 的心跳接口
-   */
-  @GetMapping("/tick")
-  public void taskTick(@RequestParam("taskId") String taskId) {
-    taskService.taskTick(taskId);
-  }
-
-  /**
-   * 任务执行前调用该接口,确定该任务没有被终止
-   */
-  @GetMapping("/confirm")
-  public Boolean taskConfirm(@RequestParam("taskId") String taskId) {
-    return taskService.taskConfirm(taskId);
-  }
+    @Resource
+    private TaskService taskService;
+
+    // -------------------------------- Comment --------------------------------
+
+    /**
+     * 修改任务状态
+     */
+    @GetMapping("/state")
+    public void taskState(@RequestParam("taskId") String taskId, @RequestParam("state") String state, @RequestParam("podName") String podName) {
+        taskService.taskState(taskId, state, podName);
+    }
+
+    /**
+     * Pod 的心跳接口
+     */
+    @GetMapping("/tick")
+    public void taskTick(@RequestParam("taskId") String taskId) {
+        taskService.taskTick(taskId);
+    }
+
+    /**
+     * 任务执行前调用该接口,确定该任务没有被终止
+     */
+    @GetMapping("/confirm")
+    public Boolean taskConfirm(@RequestParam("taskId") String taskId) {
+        return taskService.taskConfirm(taskId);
+    }
 
 
 }

+ 6 - 0
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/service/ProjectService.java

@@ -291,6 +291,12 @@ public class ProjectService {
      */
     @SneakyThrows
     public void stopProject(String projectId, String projectType) {
+        // 将项目状态修改为终止中
+        if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+            manualProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_TERMINATING, TimeUtil.getNowForMysql());
+        } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+            autoSubProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_TERMINATING, TimeUtil.getNowForMysql());
+        }
 
         //1 判断项目是否已经运行
         PrefixEntity redisPrefix = projectUtil.getRedisPrefixByProjectIdAndProjectType(projectId, projectType);

+ 0 - 447
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/service/TaskManager.java

@@ -1,447 +0,0 @@
-package com.css.simulation.resource.scheduler.service;
-
-import api.common.pojo.constants.DictConstants;
-import api.common.util.*;
-import com.css.simulation.resource.scheduler.configuration.feign.VideoFeignClient;
-import com.css.simulation.resource.scheduler.configuration.kubernetes.KubernetesConfiguration;
-import com.css.simulation.resource.scheduler.entity.*;
-import com.css.simulation.resource.scheduler.mapper.*;
-import com.css.simulation.resource.scheduler.util.*;
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import io.kubernetes.client.openapi.ApiClient;
-import io.minio.MinioClient;
-import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.ibatis.session.ExecutorType;
-import org.apache.ibatis.session.SqlSession;
-import org.apache.ibatis.session.SqlSessionFactory;
-import org.apache.kafka.clients.admin.Admin;
-import org.springframework.beans.factory.annotation.Value;
-import org.springframework.data.redis.core.StringRedisTemplate;
-import org.springframework.stereotype.Component;
-
-import javax.annotation.Resource;
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.InputStreamReader;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-@Component
-@Slf4j
-public class TaskManager {
-
-  @Value("${scheduler.linux-path.pod-yaml-directory}")
-  private String podYamlDirectory;
-  @Value("${minio.bucket-name}")
-  private String bucketName;
-  @Value("${scheduler.linux-path.score-py}")
-  private String pyPath;
-  @Value("${scheduler.linux-path.temp}")
-  private String linuxTempPath;
-  @Value("${simulation-cloud.client-id}")
-  private String clientId;
-  @Value("${simulation-cloud.client-secret}")
-  private String clientSecret;
-  @Value("${simulation-cloud.token-uri}")
-  private String tokenUri;
-  @Value("${simulation-cloud.evaluation-level-uri}")
-  private String evaluationLevelUri;
-  @Value("${scheduler.minio-path.project-result}")
-  private String resultPathMinio;
-  @Resource
-  private StringRedisTemplate stringRedisTemplate;
-  @Resource
-  private TaskMapper taskMapper;
-  @Resource
-  private MinioClient minioClient;
-  @Resource
-  private ManualProjectMapper manualProjectMapper;
-  @Resource
-  private AutoSubProjectMapper autoSubProjectMapper;
-  @Resource
-  private TaskIndexManager taskIndexManager;
-  @Resource
-  private IndexMapper indexMapper;
-  @Resource
-  private ScoringRulesMapper scoringRulesMapper;
-  @Resource
-  private CloseableHttpClient closeableHttpClient;
-  @Resource
-  private RequestConfig requestConfig;
-  @Resource
-  private ProjectUtil projectUtil;
-  @Resource
-  private VideoFeignClient videoFeignClient;
-  @Resource
-  private SqlSessionFactory sqlSessionFactory;
-  @Resource
-  private KubernetesConfiguration kubernetesConfiguration;
-  @Resource
-  private ApiClient apiClient;
-  @Resource(name = "myKafkaAdmin")
-  private Admin admin;
-
-  public void batchInsertTask(List<TaskEntity> taskEntityList) {
-    try (SqlSession sqlSession = sqlSessionFactory.openSession(ExecutorType.BATCH, false)) {
-      TaskMapper taskMapper1 = sqlSession.getMapper(TaskMapper.class);
-      for (TaskEntity taskEntity : taskEntityList) {
-        taskMapper1.insert(taskEntity);
-      }
-      sqlSession.commit();
-    }
-  }
-
-  /**
-   * 加事务的话高并发情况下会死锁
-   */
-  @SneakyThrows
-  public boolean isProjectCompleted(PrefixEntity redisPrefix, String projectId, String projectType, String maxSimulationTime, String taskId, String state, String podName) {
-    boolean result;
-    String nodeName = projectUtil.getNodeNameOfPod(projectId, podName);
-    if (DictConstants.TASK_RUNNING.equals(state)) {  // 运行中的 pod 无需删除
-      // 将运行中的任务的 pod 名称放入 redis
-      stringRedisTemplate.opsForValue().set(redisPrefix.getTaskPodKey(), podName);
-      taskTick(taskId); // 刷新一下心跳
-      log.info("修改任务 " + taskId + " 的状态为 " + state + ",pod 名称为:" + podName);
-      taskMapper.updateStateWithStartTime(taskId, state, TimeUtil.getNowForMysql());
-      return false;
-    } else { // 结束的 pod 都直接删除,并判断项目是否完成
-      // -------------------------------- 处理状态 --------------------------------
-      log.info("修改任务 {} 的状态为 {} ,pod 名称为 {} ,并删除 pod。", taskId, state, podName);
-      if (DictConstants.TASK_ABORTED.equals(state)) {
-        String minioPathOfErrorLog = resultPathMinio + projectId + "/" + taskId + "error.log";
-        boolean objectExist = MinioUtil.isObjectExist(minioClient, bucketName, minioPathOfErrorLog);
-        String targetEvaluate;
-        if (objectExist) {
-          String errorString = MinioUtil.downloadToString(minioClient, bucketName, minioPathOfErrorLog);
-          String[] lines = errorString.split("\n");
-          StringBuilder errorMessage = new StringBuilder();
-          for (String line : lines) {
-            if (line.startsWith("Original Error")) {
-              errorMessage.append(line).append("\n");
-            }
-            if (line.startsWith("Possible Cause")) {
-              errorMessage.append(line);
-              break;
-            }
-          }
-          targetEvaluate = errorMessage.toString();
-        } else {
-          targetEvaluate = DictConstants.TASK_ERROR_REASON_2;
-        }
-        taskMapper.updateFailStateWithStopTime(taskId, state, TimeUtil.getNowForMysql(), targetEvaluate);
-      } else if (DictConstants.TASK_TERMINATED.equals(state)) {
-        taskMapper.updateFailStateWithStopTime(taskId, state, TimeUtil.getNowForMysql(), DictConstants.TASK_ERROR_REASON_3);
-      } else if (DictConstants.TASK_ANALYSIS.equals(state)) { // 该状态只会获得一次
-        taskMapper.updateSuccessStateWithStopTime(taskId, state, TimeUtil.getNowForMysql());
-        // 查询项目是否使用 gpu 生成视频(0是1否)
-        String isChoiceGpu = projectUtil.getProjectByProjectId(projectId).getIsChoiceGpu();
-        if (DictConstants.VIDEO_GPU.equals(isChoiceGpu)) {
-          log.info("项目 {} 使用 GPU 生成视频。", projectId);
-        } else if (DictConstants.VIDEO_CPU.equals(isChoiceGpu)) {
-          log.info("项目 {} 使用 CPU 生成视频。", projectId);
-          videoFeignClient.generateVideo(projectId, projectType, maxSimulationTime, taskId);
-        } else {
-          throw new RuntimeException("未设置视频生成。");
-        }
-      }
-      // -------------------------------- 判断项目是否结束 --------------------------------
-      result = projectUtil.complete(redisPrefix, projectId);
-      if (!result) {
-        log.info("项目 " + projectId + " 还未运行完成。");
-        projectUtil.createNextPod(projectId, nodeName, podName);
-      } else {
-        //如果项目已完成先把 pod 删除,并归还并行度
-        KubernetesUtil.deletePod2(apiClient, kubernetesConfiguration.getNamespace(), podName);
-        projectUtil.incrementOneParallelismOfGpuNode(nodeName);
-      }
-      RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getTaskMessageKey());
-      RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getTaskPodKey());
-    }
-    return result;
-  }
-
-
-  /**
-   * @param userId 项目创建用户的 id
-   */
-  @SneakyThrows
-  public void score(String projectRunningKey, String userId, String projectId, String projectType) {
-    stringRedisTemplate.delete(projectRunningKey);
-    // -------------------------------- 打分 --------------------------------
-    ProjectEntity projectEntity = null;
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-      projectEntity = manualProjectMapper.selectById(projectId);
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-      projectEntity = autoSubProjectMapper.selectById(projectId);
-    }
-    if (projectEntity == null) {
-      log.error("不存在项目 {}" + projectId);
-      return;
-    }
-    String packageId = projectEntity.getScenePackageId();  // 场景测试包 id,指标的rootId
-    TimeUnit.SECONDS.sleep(10); // 先等一下数据库更新
-    List<TaskEntity> taskList = taskMapper.selectTaskListByProjectId(projectId);  // 所有任务信息
-    if (CollectionUtil.isEmpty(taskList)) {
-      log.error("项目 {} 下没有查询到任务!", projectId);
-      return;
-    }
-    indexMapper.deleteFirstByProjectId(projectId);
-    indexMapper.deleteLastByProjectId(projectId);
-    //1 查询场景包对应指标
-    String allIndexKey = "project:" + projectId + ":package:" + packageId + ":all";
-    String leafIndexKey = "project:" + projectId + ":package:" + packageId + ":leaf";
-    String allIndexTemplateListJson = stringRedisTemplate.opsForValue().get(allIndexKey);
-    String leafIndexTemplateListJson = stringRedisTemplate.opsForValue().get(leafIndexKey);
-    List<IndexTemplateEntity> allIndexTemplateList = JsonUtil.jsonToList(allIndexTemplateListJson, IndexTemplateEntity.class);
-    List<IndexTemplateEntity> leafIndexTemplateList = JsonUtil.jsonToList(leafIndexTemplateListJson, IndexTemplateEntity.class);
-    log.info("共有 " + leafIndexTemplateList.size() + "个叶子节点:" + leafIndexTemplateListJson);
-    int maxLevel = 1; // 用于计算指标得分
-    List<LeafIndexEntity> leafIndexList = new ArrayList<>();
-    for (int i = 0; i < leafIndexTemplateList.size(); i++) {
-      String scoreExplain = null; // 每个叶子指标下的任务的得分说明一样和叶子指标一致
-      IndexTemplateEntity leafIndexTemplate = leafIndexTemplateList.get(i);
-      String indexId = leafIndexTemplate.getIndexId(); // 叶子指标id
-      String parentId = leafIndexTemplate.getParentId(); // 父 id
-      String rootId = leafIndexTemplate.getRootId(); // 包 id
-      String weight = leafIndexTemplate.getWeight(); // 权重
-      Integer packageLevel = leafIndexTemplate.getPackageLevel(); // 几级指标
-      String ruleName = leafIndexTemplate.getRuleName();    // 打分脚本名称,例如 AEB_1-1
-      String ruleDetails = leafIndexTemplate.getRuleDetails();    // 打分脚本内容
-      if (packageLevel > maxLevel) {
-        maxLevel = packageLevel;
-      }
-      log.info("开始执行对第 " + (i + 1) + " 个叶子节点 " + indexId + " 进行打分!");
-      // 根据叶子指标id查询评分规则创建用户id
-      String createUserIdOfRule = scoringRulesMapper.selectCreateUserIdByIndexId(indexId);
-      //1 判断有没有用户目录,没有则复制
-      String scoreDirectoryOfUser = linuxTempPath + "score/" + createUserIdOfRule + "/";
-      if (!new File(scoreDirectoryOfUser + "main.py").exists()) {
-        // 复制 main.py
-        FileUtil.createDirectory(scoreDirectoryOfUser);
-        FileUtil.cpR(pyPath, scoreDirectoryOfUser);
-      }
-      //2 将打分规则保存到script目录
-
-      String ruleFilePath = scoreDirectoryOfUser + "scripts/" + ruleName.split("_")[0] + "/" + ruleName + ".py";
-      FileUtil.writeInputStreamToLocalFile(IoUtil.stringToInputStream(ruleDetails), ruleFilePath);
-      log.info("将叶子节点 " + indexId + " 对应的打分规则保存到临时目录:" + ruleFilePath);
-      List<TaskEntity> taskListOfLeafIndex = taskList.stream().filter(task -> indexId.equals(task.getLastTargetId())).collect(Collectors.toList());
-      log.info("叶子节点 " + indexId + " 包括 " + taskListOfLeafIndex.size() + " 个任务:" + taskListOfLeafIndex);
-      // 计算叶子指标的得分
-      // 使用 stream 流会出现无法进入循环的情况
-      for (TaskEntity taskOfLeaf : taskListOfLeafIndex) {
-        String task2Id = taskOfLeaf.getId();
-
-        String runState = taskOfLeaf.getRunState();
-        log.info("TaskManager--score 任务 " + task2Id + " 的运行状态为:" + runState);
-        if (DictConstants.TASK_ANALYSIS.equals(runState)) {
-          taskMapper.updateSuccessStateWithStopTime(task2Id, DictConstants.TASK_ANALYSING, TimeUtil.getNowForMysql());
-          // 计算每个任务的得分
-          String result1OfMinio = taskOfLeaf.getRunResultFilePath() + "/Ego.csv";
-          String result1OfLinux = linuxTempPath + result1OfMinio;
-          String result2OfMinio = taskOfLeaf.getRunResultFilePath() + "/evaluation.csv";
-          String result2OfLinux = linuxTempPath + result2OfMinio;
-          String scoreCommand = "python3 " + scoreDirectoryOfUser + "main.py " + result1OfLinux + " " + result2OfLinux + " " + taskOfLeaf.getSceneType() + " " + ruleName; // 指定打分脚本
-          String scoreResult;
-          ScoreEntity score = null;
-          log.info("下载 minio 上的结果文件 " + result1OfMinio + " 和 " + result2OfMinio + " 到临时目录:" + linuxTempPath);
-          MinioUtil.downloadToFile(minioClient, bucketName, result1OfMinio, result1OfLinux);  // 也可改成下载到指定ip的服务器上,需要保证和打分脚本在一台机器上。
-          MinioUtil.downloadToFile(minioClient, bucketName, result2OfMinio, result2OfLinux);  // 也可改成下载到指定ip的服务器上,需要保证和打分脚本在一台机器上。
-          log.info("开始执行打分命令:" + scoreCommand);
-          Runtime r = Runtime.getRuntime();
-          Process p = r.exec(scoreCommand, null, new File(scoreDirectoryOfUser));
-          BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream()));
-          StringBuilder sb = new StringBuilder();
-          String inline;
-          while (null != (inline = br.readLine())) {
-            sb.append(inline).append("\n");
-          }
-          scoreResult = sb.toString();
-          log.info("项目" + projectId + " 的任务 " + task2Id + " 打分结束,结果为:" + scoreResult);
-          String replace = StringUtil.replace(scoreResult, "'", "\"");
-          try {
-            score = JsonUtil.jsonToBean(replace, ScoreEntity.class);
-          } catch (Exception e) { // 打分失败
-            log.info("项目" + projectId + " 的任务 " + task2Id + " 打分失败:", e);
-          }
-          if (score != null) {
-            taskOfLeaf.setReturnSceneId(score.getUnit_scene_ID());
-            taskOfLeaf.setTargetEvaluate(score.getEvaluate_item());
-            taskOfLeaf.setScoreExplain(score.getScore_description());
-            taskOfLeaf.setModifyUserId(userId);
-            taskOfLeaf.setModifyTime(TimeUtil.getNowForMysql());
-            scoreExplain = score.getScore_description();
-            taskOfLeaf.setRunState(DictConstants.TASK_COMPLETED);
-            double taskScore = score.getUnit_scene_score();
-            if (taskScore == -1.0) {
-              taskOfLeaf.setScore(0.0);
-              taskOfLeaf.setScored(false);
-            } else {
-              taskOfLeaf.setScore(NumberUtil.cut(taskScore, 2));
-              taskOfLeaf.setScored(true);
-            }
-            taskMapper.updateSuccessState(taskOfLeaf, DictConstants.TASK_COMPLETED);
-          } else {
-            //1 修改任务状态为 aborted
-            taskOfLeaf.setRunState(DictConstants.TASK_ABORTED);
-            taskOfLeaf.setScore(0.0);
-            taskOfLeaf.setScored(false);
-            taskMapper.updateFailStateWithStopTime(task2Id, DictConstants.TASK_ABORTED, TimeUtil.getNowForMysql(), DictConstants.TASK_ERROR_REASON_5);
-          }
-        }
-      }
-
-      // 全部参与计算
-      // 计算不合格的任务数(不到100分就是不合格,执行失败的不算)
-      // 计算叶子指标下任务得分总和
-      int errorSceneNumber = 0;   // 仿真失败的任务
-      int notScoredSceneNumber = 0;   // 评分失败的任务
-      int notStandardSceneNumber = 0;
-      int standardSceneNumber = 0;
-      double leafSum = 0.0;
-      for (TaskEntity task : taskListOfLeafIndex) {
-        Double scoreTemp = task.getScore();
-        if (scoreTemp == null) {   // 失败状态的任务是没有分数的,计作 0 分。
-          errorSceneNumber++;
-          scoreTemp = 0.0;
-        } else if (task.getScored() == null || !task.getScored()) {  // 如果评分失败,也计作 0 分
-          notScoredSceneNumber++;
-          scoreTemp = 0.0;
-        } else if (scoreTemp < 100.0) {
-          notStandardSceneNumber++;
-        } else if (scoreTemp == 100.0) {
-          standardSceneNumber++;
-        }
-        // 计算分数总和
-        leafSum += scoreTemp;
-      }
-
-      // 计算任务的个数
-      long taskNumberToScore = taskListOfLeafIndex.size();
-      log.info("项目 " + projectId + " 的叶子指标 " + indexId + " 下参与计算的任务总数为 " + taskNumberToScore + ":仿真异常场景个数 " + errorSceneNumber + "、未达标场景个数 " + notStandardSceneNumber + "、达标场景个数 " + standardSceneNumber);
-
-      // 计算叶子指标得分(任务得分总和 / 任务数量)
-      double leafIndexScore = NumberUtil.cut(leafSum / taskNumberToScore, 2);
-      // 创建叶子指标对象
-      leafIndexTemplate.setTempScore(leafIndexScore);
-
-      LeafIndexEntity leafIndex = LeafIndexEntity.builder().id(StringUtil.getRandomUUID()).pId(projectId).target(leafIndexTemplate.getIndexId()).errorSceneNum(errorSceneNumber).notScoredSceneNum(notScoredSceneNumber).notStandardSceneNum(notStandardSceneNumber).standardSceneNum(standardSceneNumber).score(leafIndexScore).indexId(indexId).parentId(parentId).rootId(rootId).weight(weight).scoreExplain(scoreExplain).packageLevel(packageLevel).build();
-      leafIndex.setCreateUserId(userId);
-      leafIndex.setCreateTime(TimeUtil.getNowForMysql());
-      leafIndex.setModifyUserId(userId);
-      leafIndex.setModifyTime(TimeUtil.getNowForMysql());
-      leafIndex.setIsDeleted("0");
-
-      leafIndexList.add(leafIndex);
-    }
-    // 保存叶子指标得分
-    taskIndexManager.batchInsertLeafIndex(leafIndexList);
-    // 保存一级指标分数
-    log.info("项目 " + projectId + " 的所有任务分数为:" + taskList);
-    computeFirst(leafIndexList, allIndexTemplateList, projectId, maxLevel);
-    log.info("项目 " + projectId + " 打分完成!");
-  }
-
-  public void computeFirst(List<LeafIndexEntity> leafIndexList, List<IndexTemplateEntity> allIndexTemplateList, String projectId, int maxLevel) {
-
-    log.info("计算父指标得分:" + leafIndexList);
-    Iterator<LeafIndexEntity> leafTaskIndexIterator = leafIndexList.iterator();
-    // 把 1 级的指标得分直接保存
-    while (leafTaskIndexIterator.hasNext()) {
-      LeafIndexEntity leafTaskIndex = leafTaskIndexIterator.next();
-      if (leafTaskIndex.getPackageLevel() == 1) {
-        leafTaskIndex.setCreateUserId(leafTaskIndex.getCreateUserId());
-        leafTaskIndex.setCreateTime(TimeUtil.getNowForMysql());
-        leafTaskIndex.setModifyUserId(leafTaskIndex.getModifyUserId());
-        leafTaskIndex.setModifyTime(TimeUtil.getNowForMysql());
-        leafTaskIndex.setIsDeleted("0");
-        indexMapper.insertFirstIndex(leafTaskIndex);
-        leafTaskIndexIterator.remove();
-      }
-    }
-    if (leafIndexList.size() > 0) {
-      List<LeafIndexEntity> nextLevelIndexList = new ArrayList<>();
-      // 找出等级和 maxLevel 不相同的指标暂时不计算
-      leafIndexList.stream().filter(po -> maxLevel != po.getPackageLevel()).forEach(nextLevelIndexList::add);
-      // 找出等级和 maxLevel 相同的指标并根据父指标分组
-      Map<String, List<LeafIndexEntity>> sonTaskIndexMap = leafIndexList.stream().filter(po -> maxLevel == po.getPackageLevel()).collect(Collectors.groupingBy(LeafIndexEntity::getParentId));
-      Set<String> parentIdSet = sonTaskIndexMap.keySet();
-      List<String> parentIdList = CollectionUtil.setToList(parentIdSet);
-
-      List<IndexTemplateEntity> parentIndexTemplateList = allIndexTemplateList.stream().filter(indexTemplate -> parentIdList.contains(indexTemplate.getIndexId())).collect(Collectors.toList());
-      // 计算父指标得分
-      parentIndexTemplateList.forEach(indexTemplate -> {
-        String weight = indexTemplate.getWeight();
-        List<LeafIndexEntity> sonTaskIndexList = sonTaskIndexMap.get(indexTemplate.getIndexId());
-        double parentScore = NumberUtil.cut(sonTaskIndexList.stream().mapToDouble(taskIndex -> taskIndex.getScore() * Double.parseDouble(taskIndex.getWeight()) / 100).sum(), 2);
-        LeafIndexEntity parentTaskIndex = LeafIndexEntity.builder().id(StringUtil.getRandomUUID()).pId(projectId).target(indexTemplate.getIndexId()).score(parentScore).indexId(indexTemplate.getIndexId()).parentId(indexTemplate.getParentId()).rootId(indexTemplate.getRootId()).weight(weight).packageLevel(maxLevel - 1).build();
-        nextLevelIndexList.add(parentTaskIndex);
-      });
-      // 将父指标作为叶子指标递归
-      computeFirst(nextLevelIndexList, allIndexTemplateList, projectId, maxLevel - 1);
-    }
-  }
-
-  @SneakyThrows
-  public void evaluationLevel(String projectId) {
-    String tokenUrl = tokenUri + "?grant_type=client_credentials" + "&client_id=" + clientId + "&client_secret=" + clientSecret;
-    log.info("获取仿真云平台 token:" + tokenUrl);
-    String response = HttpUtil.get(closeableHttpClient, requestConfig, tokenUrl);
-    ObjectMapper objectMapper = new ObjectMapper();
-    JsonNode jsonNode = objectMapper.readTree(response);
-    String accessToken = jsonNode.path("access_token").asText();
-    log.info("仿真云平台 token 为:" + accessToken);
-    Map<String, String> headers = new HashMap<>();
-    headers.put("Authorization", "Bearer " + accessToken);
-    Map<String, String> params = new HashMap<>();
-    params.put("id", projectId);
-    String post = HttpUtil.post(closeableHttpClient, requestConfig, evaluationLevelUri, headers, params);
-    log.info("访问仿真云平台评价等级接口:" + evaluationLevelUri + ",请求头为:" + headers + ",请求体为:" + params + "结果为:" + post);
-  }
-
-
-  public Boolean taskConfirm(String taskId) {
-    // 查询 task 如果不是 pending 则不执行
-    String state = taskMapper.selectStateById(taskId);
-    return DictConstants.TASK_PENDING.equals(state);
-  }
-
-  public void taskTick(String taskId) {
-    log.info("收到任务 " + taskId + " 的心跳。");
-    TaskEntity taskEntity = taskMapper.selectById(taskId);
-    String projectId = taskEntity.getPId();
-    String userId = taskEntity.getCreateUserId();
-    // 刷新 redis 心跳时间
-    PrefixEntity redisPrefix = projectUtil.getRedisPrefixByUserIdAndProjectIdAndTaskId(userId, projectId, taskId);
-    if (RedisUtil.getStringByKey(stringRedisTemplate, redisPrefix.getProjectRunningKey()) != null) {
-      stringRedisTemplate.opsForValue().set(redisPrefix.getTaskTickKey(), TimeUtil.getNowString());
-    }
-
-  }
-
-
-  @SneakyThrows
-  public void done(PrefixEntity redisPrefix, String projectId, String projectType) {
-    // 更新项目状态为已完成
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-      manualProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_COMPLETED, TimeUtil.getNowForMysql());
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-      autoSubProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_COMPLETED, TimeUtil.getNowForMysql());
-    }
-    // 删除 kafka topic
-    ApacheKafkaUtil.deleteTopic(admin, projectId);
-    // 删除 redis 中的 项目运行信息 键值对
-    RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getProjectRunningKey());
-    RedisUtil.deleteByPrefix(stringRedisTemplate, "project:" + projectId);
-    // 删除剩余 yaml
-    projectUtil.deleteYamlByProjectId(projectId);
-  }
-
-
-}

+ 46 - 44
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/service/TaskService.java

@@ -6,6 +6,7 @@ import com.css.simulation.resource.scheduler.entity.ProjectEntity;
 import com.css.simulation.resource.scheduler.entity.TaskEntity;
 import com.css.simulation.resource.scheduler.mapper.TaskMapper;
 import com.css.simulation.resource.scheduler.util.ProjectUtil;
+import com.css.simulation.resource.scheduler.util.TaskUtil;
 import lombok.SneakyThrows;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.stereotype.Service;
@@ -15,58 +16,59 @@ import javax.annotation.Resource;
 @Service
 @Slf4j
 public class TaskService {
-  @Resource
-  private TaskManager taskManager;
-  @Resource
-  private TaskMapper taskMapper;
-  @Resource
-  private ProjectUtil projectUtil;
-  @Resource
-  private CustomRedisClient customRedisClient;
+    @Resource
+    private TaskUtil taskUtil;
+    @Resource
+    private TaskMapper taskMapper;
+    @Resource
+    private ProjectUtil projectUtil;
+    @Resource
+    private CustomRedisClient customRedisClient;
 
 
-  // -------------------------------- Comment --------------------------------
+    // -------------------------------- Comment --------------------------------
 
-  @SneakyThrows
-  public void taskState(String taskId, String state, String podName) {
-    String lock1 = "taskId:" + taskId + ":state:" + state + ":pod-name:" + podName;
-    customRedisClient.lock(lock1, 1L, 30 * 60L);
-    try {
-      // 查询相关信息
-      TaskEntity taskEntity = taskMapper.selectById(taskId);
-      String projectId = taskEntity.getPId(); // 项目 id
-      ProjectEntity projectEntity = projectUtil.getProjectByProjectId(projectId);
-      String projectType = projectEntity.getProjectType();  // 项目类型
-      String maxSimulationTime = projectEntity.getMaxSimulationTime();  // 项目类型
-      String userId = taskEntity.getCreateUserId();   // 用户 id
-      PrefixEntity redisPrefix = projectUtil.getRedisPrefixByUserIdAndProjectIdAndTaskId(userId, projectId, taskId);
-      // 判断是否完成
-      boolean projectCompleted = taskManager.isProjectCompleted(redisPrefix, projectId, projectType, maxSimulationTime, taskId, state, podName);
-      if (projectCompleted) {
+    @SneakyThrows
+    public void taskState(String taskId, String state, String podName) {
+        TaskEntity taskEntity = taskMapper.selectById(taskId);
+        String projectId = taskEntity.getPId(); // 项目 id
+        String lock1 = "taskId:" + taskId + ":state:" + state + ":pod-name:" + podName;
         String lock2 = "project:" + projectId + ":completed-lock";
-        customRedisClient.tryLock(lock2, 10 * 60L);
-        log.info("项目 {} 开始打分。", projectId);
-        taskManager.score(redisPrefix.getProjectRunningKey(), userId, projectId, projectType);
-        log.info("项目 {} 计算评价等级。", projectId);
-        taskManager.evaluationLevel(projectId);
-        log.info("项目 {} 开始释放资源。", projectId);
-        taskManager.done(redisPrefix, projectId, projectType);
-        log.info("项目 {} 运行结束。", projectId);
-      }
-    } finally {
-      customRedisClient.unlock(lock1);
-    }
+        customRedisClient.lock(lock1, 1L, 30 * 60L);
+        try {
+            // 查询相关信息
+            ProjectEntity projectEntity = projectUtil.getProjectByProjectId(projectId);
+            String projectType = projectEntity.getProjectType();  // 项目类型
+            String maxSimulationTime = projectEntity.getMaxSimulationTime();  // 项目类型
+            String userId = taskEntity.getCreateUserId();   // 用户 id
+            PrefixEntity redisPrefix = projectUtil.getRedisPrefixByUserIdAndProjectIdAndTaskId(userId, projectId, taskId);
+            // 判断是否完成
+            boolean projectCompleted = taskUtil.isProjectCompleted(redisPrefix, projectId, projectType, maxSimulationTime, taskId, state, podName);
+            if (projectCompleted) {
+                customRedisClient.tryLock(lock2, 10 * 60L);
+                log.info("项目 {} 开始打分。", projectId);
+                taskUtil.score(redisPrefix.getProjectRunningKey(), userId, projectId, projectType);
+                log.info("项目 {} 计算评价等级。", projectId);
+                taskUtil.evaluationLevel(projectId);
+                log.info("项目 {} 开始释放资源。", projectId);
+                taskUtil.done(redisPrefix, projectId, projectType);
+                log.info("项目 {} 运行结束。", projectId);
+            }
+        } finally {
+            customRedisClient.unlock(lock2);
+            customRedisClient.unlock(lock1);
+        }
 
-  }
+    }
 
 
-  public Boolean taskConfirm(String taskId) {
-    return taskManager.taskConfirm(taskId);
-  }
+    public Boolean taskConfirm(String taskId) {
+        return taskUtil.taskConfirm(taskId);
+    }
 
-  public void taskTick(String taskId) {
-    taskManager.taskTick(taskId);
-  }
+    public void taskTick(String taskId) {
+        taskUtil.taskTick(taskId);
+    }
 
 
 }

+ 441 - 441
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/util/ProjectUtil.java

@@ -6,6 +6,7 @@ import api.common.util.*;
 import com.css.simulation.resource.scheduler.configuration.esmini.EsminiConfiguration;
 import com.css.simulation.resource.scheduler.configuration.kubernetes.KubernetesConfiguration;
 import com.css.simulation.resource.scheduler.configuration.redis.CustomRedisClient;
+import com.css.simulation.resource.scheduler.controller.TaskController;
 import com.css.simulation.resource.scheduler.entity.*;
 import com.css.simulation.resource.scheduler.mapper.AutoSubProjectMapper;
 import com.css.simulation.resource.scheduler.mapper.ClusterMapper;
@@ -33,468 +34,467 @@ import java.util.stream.Collectors;
 @Slf4j
 public class ProjectUtil {
 
-  @Value("${scheduler.linux-path.pod-yaml-directory}")
-  private String podYamlDirectory;
-  @Value("${scheduler.host.hostname}")
-  private String hostname;
-  @Value("${scheduler.host.username}")
-  private String username;
-  @Value("${scheduler.host.password}")
-  private String password;
-
-
-  @Resource
-  private ManualProjectMapper manualProjectMapper;
-  @Resource
-  private AutoSubProjectMapper autoSubProjectMapper;
-  @Resource
-  private UserMapper userMapper;
-  @Resource
-  private ClusterMapper clusterMapper;
-  @Resource
-  private EsminiConfiguration esminiConfiguration;
-  @Resource
-  private KubernetesConfiguration kubernetesConfiguration;
-  @Resource
-  private ApiClient apiClient;
-  @Resource
-  private StringRedisTemplate stringRedisTemplate;
-  @Resource
-  private CustomRedisClient customRedisClient;
-
-
-  @SneakyThrows
-  public void deleteYamlByProjectId(String projectId) {
-    List<String> absolutePathList = FileUtil.listAbsolutePath(podYamlDirectory);
-    for (String absolutePath : absolutePathList) {
-      if (absolutePath.contains(projectId)) {
-        boolean delete = new File(absolutePath).delete();
-      }
+    @Value("${scheduler.linux-path.pod-yaml-directory}")
+    private String podYamlDirectory;
+    @Value("${scheduler.host.hostname}")
+    private String hostname;
+    @Value("${scheduler.host.username}")
+    private String username;
+    @Value("${scheduler.host.password}")
+    private String password;
+
+    @Resource
+    private ManualProjectMapper manualProjectMapper;
+    @Resource
+    private AutoSubProjectMapper autoSubProjectMapper;
+    @Resource
+    private UserMapper userMapper;
+    @Resource
+    private ClusterMapper clusterMapper;
+    @Resource
+    private EsminiConfiguration esminiConfiguration;
+    @Resource
+    private KubernetesConfiguration kubernetesConfiguration;
+    @Resource
+    private ApiClient apiClient;
+    @Resource
+    private StringRedisTemplate stringRedisTemplate;
+    @Resource
+    private CustomRedisClient customRedisClient;
+
+
+    @SneakyThrows
+    public void deleteYamlByProjectId(String projectId) {
+        List<String> absolutePathList = FileUtil.listAbsolutePath(podYamlDirectory);
+        for (String absolutePath : absolutePathList) {
+            if (absolutePath.contains(projectId)) {
+                boolean delete = new File(absolutePath).delete();
+            }
+        }
     }
-  }
-
-
-  public List<NodeEntity> getNodeListToCount(Map<String, Integer> nodeMap) {
-    List<NodeEntity> result = new ArrayList<>();
-    nodeMap.forEach((nodeName, parallelism) -> {
-      for (int i = 0; i < parallelism; i++) {
-        result.add(new NodeEntity(nodeName, 0));
-      }
-    });
-    return result;
-  }
-
-  /**
-   * 判断算法是否已经导入
-   *
-   * @return 算法是否已经导入
-   */
-  @SneakyThrows
-  public boolean isImported(String dockerImageWithoutVersion) {
-    String dockerImageListString = LinuxUtil.execute("docker images");
-    return dockerImageListString.contains(dockerImageWithoutVersion);
-  }
-
-  public String getRandomPodName(String projectId) {
-    return "project-" + projectId + "-" + StringUtil.getRandomEightBitUUID();
-  }
-
-  public String getPodYamlName(String nodeName, String podName) {
-    return nodeName + "#" + podName + ".yaml";
-  }
-
-
-  public void deletePod(String podName) {
-    try {
-      // 先删除 redis key
-      KubernetesUtil.deletePod(apiClient, kubernetesConfiguration.getNamespace(), podName);
-      log.info("等待 pod " + podName + " 的资源释放完成。");
-      TimeUnit.SECONDS.sleep(7);
-    } catch (ApiException apiException) {
-      log.info("pod " + podName + " 已删除。");
-    } catch (Exception e) {
-      e.printStackTrace();
-      log.error("删除 pod " + podName + " 报错。", e);
+
+
+    public List<NodeEntity> getNodeListToCount(Map<String, Integer> nodeMap) {
+        List<NodeEntity> result = new ArrayList<>();
+        nodeMap.forEach((nodeName, parallelism) -> {
+            for (int i = 0; i < parallelism; i++) {
+                result.add(new NodeEntity(nodeName, 0));
+            }
+        });
+        return result;
     }
-  }
 
+    /**
+     * 判断算法是否已经导入
+     *
+     * @return 算法是否已经导入
+     */
+    @SneakyThrows
+    public boolean isImported(String dockerImageWithoutVersion) {
+        String dockerImageListString = LinuxUtil.execute("docker images");
+        return dockerImageListString.contains(dockerImageWithoutVersion);
+    }
 
-  public String getNodeNameOfPod(String projectId, String podName) {
-    String key = "project:" + projectId + ":pod:" + podName + ":node";
-    final String s = stringRedisTemplate.opsForValue().get(key);
-    if (StringUtil.isEmpty(s)) {
-      throw new RuntimeException("无法获取 pod 运行所在节点:" + key);
+    public String getRandomPodName(String projectId) {
+        return "project-" + projectId + "-" + StringUtil.getRandomEightBitUUID();
     }
-    return s;
-  }
-
-
-  /**
-   * 更改一个名字继续启动
-   *
-   * @param projectId   项目 id
-   * @param nodeName    运行 pod 的节点名称
-   * @param lastPodName 即将删除的 pod 名称
-   */
-  @SneakyThrows
-  public void createNextPod(String projectId, String nodeName, String lastPodName) {
-    log.info("删除上一个 pod:projectId={},nodeName={},lastPodName={}", projectId, nodeName, lastPodName);
-    String cpuOrderString = stringRedisTemplate.opsForValue().get("project:" + projectId + ":pod:" + lastPodName + ":cpu");
-    deletePod(lastPodName);
-    RedisUtil.deleteByKey(stringRedisTemplate, "project:" + projectId + ":pod:" + lastPodName + ":cpu");
-    //2 获取新的 yaml 信息
-    final Set<String> yamlPathCacheKeySet = RedisUtil.getKeySetByPrefix(stringRedisTemplate, "project:" + projectId + ":node:" + nodeName + ":yaml");
-    if (CollectionUtil.isEmpty(yamlPathCacheKeySet)) {
-      // 如果当前节点没有下一个yaml,则返回一个并行度。
-      log.info("节点 " + nodeName + " 已经执行完被分配的项目 " + projectId + " 的所有 pod。");
-      incrementOneParallelismOfGpuNode(nodeName);
-    } else {
-      final String yamlPathCacheKey = new ArrayList<>(yamlPathCacheKeySet).get(0);
-      final String absolutePath = stringRedisTemplate.opsForValue().get(yamlPathCacheKey);
-      // 修改 cpu 编号
-      Optional.ofNullable(cpuOrderString).orElseThrow(() -> new RuntimeException("createNextPod2() pod " + lastPodName + " 缓存的 cpu 编号为空。"));
-      final String read = FileUtil.read(absolutePath);
-      final String replace = read.replace("cpu-order", "\"" + cpuOrderString + "\"");
-      FileUtil.writeStringToLocalFile(replace, absolutePath);
-      // 创建 pod
-      createPod3(projectId, yamlPathCacheKey, cpuOrderString);
-      log.info("创建项目 " + projectId + " 在节点 " + nodeName + " 的下一个 pod,使用 cpu 编号为 " + cpuOrderString);
+
+    public String getPodYamlName(String nodeName, String podName) {
+        return nodeName + "#" + podName + ".yaml";
     }
-  }
-
-  /**
-   * @param redisKey yaml 地址的缓存 key
-   */
-  public void createPodBegin(String projectId, String redisKey) {
-    final String podYamlPath = stringRedisTemplate.opsForValue().get(redisKey);
-    if (podYamlPath == null) {
-      throw new RuntimeException("根据缓存 key 获取 yaml 地址为 null:" + redisKey);
+
+
+    public void deletePod(String podName) {
+        try {
+            // 先删除 redis key
+            KubernetesUtil.deletePod(apiClient, kubernetesConfiguration.getNamespace(), podName);
+            log.info("等待 pod " + podName + " 的资源释放完成。");
+            TimeUnit.SECONDS.sleep(7);
+        } catch (ApiException apiException) {
+            log.info("pod " + podName + " 已删除。");
+        } catch (Exception e) {
+            e.printStackTrace();
+            log.error("删除 pod " + podName + " 报错。", e);
+        }
     }
-    stringRedisTemplate.delete(redisKey);
-    String nodeName = new File(podYamlPath).getName().split("#")[0];
-    String podName = podYamlPath.split("#")[1].split("\\.")[0];
-    stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":node", nodeName);    // 将 pod 运行在哪个 node 上记录到 redis
-    new Thread(() -> KubernetesUtil.applyYaml(hostname, username, password, podYamlPath), "apply-" + podName).start();
-  }
-
-
-  /**
-   * @param redisKey yaml 地址的缓存 key
-   */
-  public void createPod3(String projectId, String redisKey, String cpuOrderString) {
-    final String podYamlPath = stringRedisTemplate.opsForValue().get(redisKey);
-    if (podYamlPath == null) {
-      throw new RuntimeException("根据缓存 key 获取 yaml 地址为 null:" + redisKey);
+
+
+    public String getNodeNameOfPod(String projectId, String podName) {
+        String key = "project:" + projectId + ":pod:" + podName + ":node";
+        final String s = stringRedisTemplate.opsForValue().get(key);
+        if (StringUtil.isEmpty(s)) {
+            throw new RuntimeException("无法获取 pod 运行所在节点:" + key);
+        }
+        return s;
     }
-    stringRedisTemplate.delete(redisKey);
-    String nodeName = new File(podYamlPath).getName().split("#")[0];
-    String podName = podYamlPath.split("#")[1].split("\\.")[0];
-    stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":node", nodeName);    // 将 pod 运行在哪个 node 上记录到 redis
-    stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":cpu", cpuOrderString);    // 将 pod 运行在哪个 node 上记录到 redis
-    new Thread(() -> KubernetesUtil.applyYaml(hostname, username, password, podYamlPath), "create-" + podName).start();
-  }
-
-
-  public ProjectEntity getProjectByProjectId(String projectId) {
-    ProjectEntity manualProjectEntity = manualProjectMapper.selectById(projectId);
-    ProjectEntity autoSubProjectEntity = autoSubProjectMapper.selectById(projectId);
-    if (manualProjectEntity != null) {
-      return manualProjectEntity;
-    } else if (autoSubProjectEntity != null) {
-      return autoSubProjectEntity;
+
+
+    /**
+     * 更改一个名字继续启动
+     *
+     * @param projectId   项目 id
+     * @param nodeName    运行 pod 的节点名称
+     * @param lastPodName 即将删除的 pod 名称
+     */
+    @SneakyThrows
+    public void createNextPod(String projectId, String nodeName, String lastPodName) {
+        log.info("删除上一个 pod:projectId={},nodeName={},lastPodName={}", projectId, nodeName, lastPodName);
+        String cpuOrderString = stringRedisTemplate.opsForValue().get("project:" + projectId + ":pod:" + lastPodName + ":cpu");
+        deletePod(lastPodName);
+        RedisUtil.deleteByKey(stringRedisTemplate, "project:" + projectId + ":pod:" + lastPodName + ":cpu");
+        //2 获取新的 yaml 信息
+        final Set<String> yamlPathCacheKeySet = RedisUtil.getKeySetByPrefix(stringRedisTemplate, "project:" + projectId + ":node:" + nodeName + ":yaml");
+        if (CollectionUtil.isEmpty(yamlPathCacheKeySet)) {
+            // 如果当前节点没有下一个yaml,则返回一个并行度。
+            log.info("节点 " + nodeName + " 已经执行完被分配的项目 " + projectId + " 的所有 pod。");
+            incrementOneParallelismOfGpuNode(nodeName);
+        } else {
+            final String yamlPathCacheKey = new ArrayList<>(yamlPathCacheKeySet).get(0);
+            final String absolutePath = stringRedisTemplate.opsForValue().get(yamlPathCacheKey);
+            // 修改 cpu 编号
+            Optional.ofNullable(cpuOrderString).orElseThrow(() -> new RuntimeException("createNextPod2() pod " + lastPodName + " 缓存的 cpu 编号为空。"));
+            final String read = FileUtil.read(absolutePath);
+            final String replace = read.replace("cpu-order", "\"" + cpuOrderString + "\"");
+            FileUtil.writeStringToLocalFile(replace, absolutePath);
+            // 创建 pod
+            createPod3(projectId, yamlPathCacheKey, cpuOrderString);
+            log.info("创建项目 " + projectId + " 在节点 " + nodeName + " 的下一个 pod,使用 cpu 编号为 " + cpuOrderString);
+        }
     }
-    throw new RuntimeException("不存在项目:" + projectId);
-  }
-
-
-  /**
-   * 获取正在运行的项目的并行度总和
-   *
-   * @param clusterRunningPrefix 集群 key 前缀
-   * @return 正在运行的项目的并行度总和
-   */
-  @SneakyThrows
-  public int getCurrentParallelismSum(String clusterRunningPrefix) {
-    int result = 0;
-    Set<String> clusterRunningKeySet = stringRedisTemplate.keys(clusterRunningPrefix + "*");
-    List<String> runningProjectSet; // 运行中的 projectId 列表
-    if (CollectionUtil.isEmpty(clusterRunningKeySet)) {
-      return 0;
+
+    /**
+     * @param redisKey yaml 地址的缓存 key
+     */
+    public void createPodBegin(String projectId, String redisKey) {
+        final String podYamlPath = stringRedisTemplate.opsForValue().get(redisKey);
+        if (podYamlPath == null) {
+            throw new RuntimeException("根据缓存 key 获取 yaml 地址为 null:" + redisKey);
+        }
+        stringRedisTemplate.delete(redisKey);
+        String nodeName = new File(podYamlPath).getName().split("#")[0];
+        String podName = podYamlPath.split("#")[1].split("\\.")[0];
+        stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":node", nodeName);    // 将 pod 运行在哪个 node 上记录到 redis
+        new Thread(() -> KubernetesUtil.applyYaml(hostname, username, password, podYamlPath), "apply-" + podName).start();
     }
-    runningProjectSet = getRunningProjectList(clusterRunningKeySet);
-    if (CollectionUtil.isEmpty(runningProjectSet)) {
-      return 0;
+
+
+    /**
+     * @param redisKey yaml 地址的缓存 key
+     */
+    public void createPod3(String projectId, String redisKey, String cpuOrderString) {
+        final String podYamlPath = stringRedisTemplate.opsForValue().get(redisKey);
+        if (podYamlPath == null) {
+            throw new RuntimeException("根据缓存 key 获取 yaml 地址为 null:" + redisKey);
+        }
+        stringRedisTemplate.delete(redisKey);
+        String nodeName = new File(podYamlPath).getName().split("#")[0];
+        String podName = podYamlPath.split("#")[1].split("\\.")[0];
+        stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":node", nodeName);    // 将 pod 运行在哪个 node 上记录到 redis
+        stringRedisTemplate.opsForValue().set("project:" + projectId + ":pod:" + podName + ":cpu", cpuOrderString);    // 将 pod 运行在哪个 node 上记录到 redis
+        new Thread(() -> KubernetesUtil.applyYaml(hostname, username, password, podYamlPath), "create-" + podName).start();
     }
-    for (String projectKey : runningProjectSet) {
-      String projectJsonTemp = stringRedisTemplate.opsForValue().get(projectKey);
-      ProjectMessageDTO projectMessageTemp = JsonUtil.jsonToBean(projectJsonTemp, ProjectMessageDTO.class);
-      result += projectMessageTemp.getCurrentParallelism();   // 获取当前正在使用的并行度
+
+
+    public ProjectEntity getProjectByProjectId(String projectId) {
+        ProjectEntity manualProjectEntity = manualProjectMapper.selectById(projectId);
+        ProjectEntity autoSubProjectEntity = autoSubProjectMapper.selectById(projectId);
+        if (manualProjectEntity != null) {
+            return manualProjectEntity;
+        } else if (autoSubProjectEntity != null) {
+            return autoSubProjectEntity;
+        }
+        throw new RuntimeException("不存在项目:" + projectId);
     }
-    return result;
-  }
-
-
-  /**
-   * 节点剩余可用并行度列表
-   *
-   * @return 节点映射(节点名,并行度)
-   */
-  public Map<String, Integer> getNodeMap() {
-    List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
-    log.info("预设并行度的节点列表为:" + initialNodeList);
-    Map<String, Integer> resultNodeMap = new HashMap<>();    // 用于执行的节点映射(节点名,并行度)
-    for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
-      GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
-      String nodeName = kubernetesNodeCopy.getHostname();
-      int maxParallelism = kubernetesNodeCopy.getParallelism();
-      String restParallelismKey = "gpu-node:" + nodeName + ":parallelism";
-      String restParallelismString = stringRedisTemplate.opsForValue().get(restParallelismKey);
-      int restParallelism;
-      if (restParallelismString == null) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
-        restParallelism = maxParallelism;
-        stringRedisTemplate.opsForValue().set(restParallelismKey, restParallelism + "");
-      } else {
-        restParallelism = Integer.parseInt(restParallelismString);
-      }
-      resultNodeMap.put(nodeName, restParallelism);
+
+
+    /**
+     * 获取正在运行的项目的并行度总和
+     *
+     * @param clusterRunningPrefix 集群 key 前缀
+     * @return 正在运行的项目的并行度总和
+     */
+    @SneakyThrows
+    public int getCurrentParallelismSum(String clusterRunningPrefix) {
+        int result = 0;
+        Set<String> clusterRunningKeySet = stringRedisTemplate.keys(clusterRunningPrefix + "*");
+        List<String> runningProjectSet; // 运行中的 projectId 列表
+        if (CollectionUtil.isEmpty(clusterRunningKeySet)) {
+            return 0;
+        }
+        runningProjectSet = getRunningProjectList(clusterRunningKeySet);
+        if (CollectionUtil.isEmpty(runningProjectSet)) {
+            return 0;
+        }
+        for (String projectKey : runningProjectSet) {
+            String projectJsonTemp = stringRedisTemplate.opsForValue().get(projectKey);
+            ProjectMessageDTO projectMessageTemp = JsonUtil.jsonToBean(projectJsonTemp, ProjectMessageDTO.class);
+            result += projectMessageTemp.getCurrentParallelism();   // 获取当前正在使用的并行度
+        }
+        return result;
     }
-    log.info("剩余并行度的节点列表为:" + resultNodeMap);
-    return resultNodeMap;
-  }
-
-
-  /**
-   * 获取集群剩余并行度
-   *
-   * @return 集群剩余并行度
-   */
-  public int getRestParallelism() {
-    List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
-    // 遍历所有节点,获取还有剩余并行度的节点
-    List<GpuNodeEntity> restNodeList = new ArrayList<>();    // 剩余并行度的节点列表
-    for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
-      GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
-      String nodeName = kubernetesNodeCopy.getHostname();   // 节点名称
-      int maxParallelism = kubernetesNodeCopy.getParallelism();
-      String restParallelismString = stringRedisTemplate.opsForValue().get("gpu-node:" + nodeName + ":parallelism");// 获取节点剩余并行度的 key
-      // -------------------------------- Comment --------------------------------
-      int restParallelism;
-      if (restParallelismString == null || Integer.parseInt(restParallelismString) > maxParallelism) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
-        restParallelism = maxParallelism;
-        stringRedisTemplate.opsForValue().set("gpu-node:" + nodeName + ":parallelism", restParallelism + "");
-      } else {
-        restParallelism = Integer.parseInt(restParallelismString);
-        kubernetesNodeCopy.setParallelism(restParallelism);
-      }
-      if (restParallelism > 0) {
-        restNodeList.add(kubernetesNodeCopy);
-      }
+
+
+    /**
+     * 节点剩余可用并行度列表
+     *
+     * @return 节点映射(节点名,并行度)
+     */
+    public Map<String, Integer> getNodeMap() {
+        List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
+        log.info("预设并行度的节点列表为:" + initialNodeList);
+        Map<String, Integer> resultNodeMap = new HashMap<>();    // 用于执行的节点映射(节点名,并行度)
+        for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
+            GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
+            String nodeName = kubernetesNodeCopy.getHostname();
+            int maxParallelism = kubernetesNodeCopy.getParallelism();
+            String restParallelismKey = "gpu-node:" + nodeName + ":parallelism";
+            String restParallelismString = stringRedisTemplate.opsForValue().get(restParallelismKey);
+            int restParallelism;
+            if (restParallelismString == null) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
+                restParallelism = maxParallelism;
+                stringRedisTemplate.opsForValue().set(restParallelismKey, restParallelism + "");
+            } else {
+                restParallelism = Integer.parseInt(restParallelismString);
+            }
+            resultNodeMap.put(nodeName, restParallelism);
+        }
+        log.info("剩余并行度的节点列表为:" + resultNodeMap);
+        return resultNodeMap;
     }
-    log.info("ProjectUtil--getRestParallelism 集群剩余并行度为:" + restNodeList);
-    return restNodeList.size() == 0 ? 0 : restNodeList.stream().mapToInt(GpuNodeEntity::getParallelism).sum();
-  }
-
-  /**
-   * 根据并行度获取用于执行的节点列表
-   * 根据剩余可用并行度降序排序
-   *
-   * @return 节点映射(节点名,并行度)
-   */
-  public Map<String, Integer> getNodeMapToUse(int parallelism) {
-    List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
-    log.info("预设并行度的节点列表为:" + initialNodeList);
-    // 遍历所有节点,获取还有剩余并行度的节点
-    List<GpuNodeEntity> restNodeList = new ArrayList<>();    // 剩余并行度的节点列表
-    for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
-      GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
-      String nodeName = kubernetesNodeCopy.getHostname();   // 节点名称
-      int maxParallelism = kubernetesNodeCopy.getParallelism();
-      String restParallelismString = stringRedisTemplate.opsForValue().get("gpu-node:" + nodeName + ":parallelism");// 获取节点剩余并行度的 key
-      // -------------------------------- Comment --------------------------------
-      int restParallelism;
-      if (restParallelismString == null || Integer.parseInt(restParallelismString) > maxParallelism) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
-        restParallelism = maxParallelism;
-        stringRedisTemplate.opsForValue().set("gpu-node:" + nodeName + ":parallelism", restParallelism + "");
-      } else {
-        restParallelism = Integer.parseInt(restParallelismString);
-        kubernetesNodeCopy.setParallelism(restParallelism);
-      }
-      if (restParallelism > 0) {
-        restNodeList.add(kubernetesNodeCopy);
-      }
+
+
+    /**
+     * 获取集群剩余并行度
+     *
+     * @return 集群剩余并行度
+     */
+    public int getRestParallelism() {
+        List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
+        // 遍历所有节点,获取还有剩余并行度的节点
+        List<GpuNodeEntity> restNodeList = new ArrayList<>();    // 剩余并行度的节点列表
+        for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
+            GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
+            String nodeName = kubernetesNodeCopy.getHostname();   // 节点名称
+            int maxParallelism = kubernetesNodeCopy.getParallelism();
+            String restParallelismString = stringRedisTemplate.opsForValue().get("gpu-node:" + nodeName + ":parallelism");// 获取节点剩余并行度的 key
+            // -------------------------------- Comment --------------------------------
+            int restParallelism;
+            if (restParallelismString == null || Integer.parseInt(restParallelismString) > maxParallelism) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
+                restParallelism = maxParallelism;
+                stringRedisTemplate.opsForValue().set("gpu-node:" + nodeName + ":parallelism", restParallelism + "");
+            } else {
+                restParallelism = Integer.parseInt(restParallelismString);
+                kubernetesNodeCopy.setParallelism(restParallelism);
+            }
+            if (restParallelism > 0) {
+                restNodeList.add(kubernetesNodeCopy);
+            }
+        }
+        log.info("ProjectUtil--getRestParallelism 集群剩余并行度为:" + restNodeList);
+        return restNodeList.size() == 0 ? 0 : restNodeList.stream().mapToInt(GpuNodeEntity::getParallelism).sum();
     }
-    log.info("剩余并行度的节点列表为:" + restNodeList);
-    Map<String, Integer> resultNodeMap = new HashMap<>();    // 用于执行的节点映射(节点名,并行度)
-    if (!CollectionUtil.isEmpty(restNodeList)) {
-      if (restNodeList.size() == 1) {
-        GpuNodeEntity tempNode = restNodeList.get(0);
-        String tempNodeName = tempNode.getHostname();
-        int tempParallelism = tempNode.getParallelism();
-        resultNodeMap.put(tempNodeName, Math.min(tempParallelism, parallelism));
-      }
-      if (restNodeList.size() > 1) {
-        for (int i = 0; i < parallelism; i++) {
-          // 每次降序排序都取剩余并行度最大的一个。
-          restNodeList.sort((o1, o2) -> o2.getParallelism() - o1.getParallelism());
-          GpuNodeEntity tempNode = restNodeList.get(0);
-          String tempNodeName = tempNode.getHostname();
-          int tempParallelism = tempNode.getParallelism();
-          if (tempParallelism > 0) {
-            tempNode.setParallelism(tempParallelism - 1);
-            CollectionUtil.addValueToMap(resultNodeMap, 1, tempNodeName);
-          }
+
+    /**
+     * 根据并行度获取用于执行的节点列表
+     * 根据剩余可用并行度降序排序
+     *
+     * @return 节点映射(节点名,并行度)
+     */
+    public Map<String, Integer> getNodeMapToUse(int parallelism) {
+        List<GpuNodeEntity> initialNodeList = kubernetesConfiguration.getNodeList(); // 预设并行度的节点列表
+        log.info("预设并行度的节点列表为:" + initialNodeList);
+        // 遍历所有节点,获取还有剩余并行度的节点
+        List<GpuNodeEntity> restNodeList = new ArrayList<>();    // 剩余并行度的节点列表
+        for (GpuNodeEntity kubernetesNodeSource : initialNodeList) {
+            GpuNodeEntity kubernetesNodeCopy = kubernetesNodeSource.clone();
+            String nodeName = kubernetesNodeCopy.getHostname();   // 节点名称
+            int maxParallelism = kubernetesNodeCopy.getParallelism();
+            String restParallelismString = stringRedisTemplate.opsForValue().get("gpu-node:" + nodeName + ":parallelism");// 获取节点剩余并行度的 key
+            // -------------------------------- Comment --------------------------------
+            int restParallelism;
+            if (restParallelismString == null || Integer.parseInt(restParallelismString) > maxParallelism) {    // 如果剩余可用并行度没有值,说明是第一次查询,则重置成最大并行度的预设值
+                restParallelism = maxParallelism;
+                stringRedisTemplate.opsForValue().set("gpu-node:" + nodeName + ":parallelism", restParallelism + "");
+            } else {
+                restParallelism = Integer.parseInt(restParallelismString);
+                kubernetesNodeCopy.setParallelism(restParallelism);
+            }
+            if (restParallelism > 0) {
+                restNodeList.add(kubernetesNodeCopy);
+            }
+        }
+        log.info("剩余并行度的节点列表为:" + restNodeList);
+        Map<String, Integer> resultNodeMap = new HashMap<>();    // 用于执行的节点映射(节点名,并行度)
+        if (!CollectionUtil.isEmpty(restNodeList)) {
+            if (restNodeList.size() == 1) {
+                GpuNodeEntity tempNode = restNodeList.get(0);
+                String tempNodeName = tempNode.getHostname();
+                int tempParallelism = tempNode.getParallelism();
+                resultNodeMap.put(tempNodeName, Math.min(tempParallelism, parallelism));
+            }
+            if (restNodeList.size() > 1) {
+                for (int i = 0; i < parallelism; i++) {
+                    // 每次降序排序都取剩余并行度最大的一个。
+                    restNodeList.sort((o1, o2) -> o2.getParallelism() - o1.getParallelism());
+                    GpuNodeEntity tempNode = restNodeList.get(0);
+                    String tempNodeName = tempNode.getHostname();
+                    int tempParallelism = tempNode.getParallelism();
+                    if (tempParallelism > 0) {
+                        tempNode.setParallelism(tempParallelism - 1);
+                        CollectionUtil.addValueToMap(resultNodeMap, 1, tempNodeName);
+                    }
+                }
+            }
+        }
+        log.info("即将使用节点的并行度为:" + resultNodeMap);
+        return resultNodeMap;
+    }
+
+
+    public PrefixEntity getRedisPrefixByUserIdAndProjectIdAndTaskId(String userId, String projectId, String taskId) {
+        //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
+        UserEntity userEntity = userMapper.selectById(userId);
+        String roleCode = userEntity.getRoleCode();
+        String useType = userEntity.getUseType();
+        String clusterId;
+        if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
+            clusterId = DictConstants.SYSTEM_CLUSTER_ID;
+        } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
+            clusterId = clusterMapper.selectByUserId(userId).getId();
+        } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
+            if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
+                clusterId = clusterMapper.selectByUserId(userId).getId();
+            } else {    //3-4 共享子账户,根据父账户的共享节点排队
+                String parentUserId = userEntity.getCreateUserId();
+                clusterId = clusterMapper.selectByUserId(parentUserId).getId();
+            }
+        } else {
+            throw new RuntimeException("未知账户类型,无法获取集群信息。");
+        }
+        String clusterPrefix = "cluster:" + clusterId;
+        String clusterRunningPrefix = clusterPrefix + ":running";
+        String projectRunningKey = clusterRunningPrefix + ":" + projectId;
+        String taskTickKey = projectRunningKey + ":task:" + taskId + ":tick";
+        String taskPodKey = projectRunningKey + ":task:" + taskId + ":pod";
+        String taskRetryKey = projectRunningKey + ":task:" + taskId + ":retry";
+        String taskMessageKey = projectRunningKey + ":task:" + taskId + ":message";
+
+        return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).projectRunningKey(projectRunningKey).taskTickKey(taskTickKey).taskPodKey(taskPodKey).taskRetryKey(taskRetryKey).taskMessageKey(taskMessageKey).build();
+
+    }
+
+
+    public PrefixEntity getRedisPrefixByClusterIdAndProjectId(String clusterId, String projectId) {
+        String clusterPrefix = "cluster:" + clusterId;
+        String clusterRunningPrefix = clusterPrefix + ":running";
+        String clusterWaitingPrefix = clusterPrefix + ":waiting";
+        String projectRunningKey = clusterRunningPrefix + ":" + projectId;
+        String projectWaitingKey = clusterWaitingPrefix + ":" + projectId;
+        String projectCheckKey = clusterWaitingPrefix + ":" + projectId + ":check";
+
+        return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).clusterWaitingPrefix(clusterWaitingPrefix).projectRunningKey(projectRunningKey).projectWaitingKey(projectWaitingKey).projectCheckKey(projectCheckKey).build();
+
+    }
+
+
+    /**
+     * 获取 projectId 列表
+     *
+     * @param clusterRunningKeySet 集群下的所有键值对(包括运行中的项目和等待中的项目)
+     * @return projectId 列表
+     */
+    public List<String> getRunningProjectList(Set<String> clusterRunningKeySet) {
+        return clusterRunningKeySet.stream().filter(key -> StringUtil.countSubString(key, ":") == 3).collect(Collectors.toList());
+    }
+
+
+    public PrefixEntity getRedisPrefixByProjectIdAndProjectType(String projectId, String projectType) {
+        String userId;
+        if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+            userId = manualProjectMapper.selectCreateUserById(projectId);
+        } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+            userId = autoSubProjectMapper.selectCreateUserById(projectId);
+        } else {
+            throw new RuntimeException("未知的项目类型。");
         }
-      }
+
+        //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
+        UserEntity userEntity = userMapper.selectById(userId);
+        String roleCode = userEntity.getRoleCode();
+        String useType = userEntity.getUseType();
+        String clusterId;
+        if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
+            clusterId = DictConstants.SYSTEM_CLUSTER_ID;
+        } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
+            clusterId = clusterMapper.selectByUserId(userId).getId();
+        } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
+            if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
+                clusterId = clusterMapper.selectByUserId(userId).getId();
+            } else {    //3-4 共享子账户,根据父账户的共享节点排队
+                String parentUserId = userEntity.getCreateUserId();
+                clusterId = clusterMapper.selectByUserId(parentUserId).getId();
+            }
+        } else {
+            throw new RuntimeException("未知账户类型,无法获取集群信息!");
+        }
+        String clusterPrefix = "cluster:" + clusterId;
+        String clusterRunningPrefix = clusterPrefix + ":running";
+        String clusterWaitingPrefix = clusterPrefix + ":waiting";
+        String projectRunningKey = clusterRunningPrefix + ":" + projectId;
+        String projectWaitingKey = clusterWaitingPrefix + ":" + projectId;
+        String projectCheckKey = projectRunningKey + ":check";
+
+        return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).clusterWaitingPrefix(clusterWaitingPrefix).projectRunningKey(projectRunningKey).projectWaitingKey(projectWaitingKey).projectCheckKey(projectCheckKey).build();
+    }
+
+
+    public void incrementOneParallelismOfGpuNode(String nodeName) {
+        incrementParallelismOfGpuNode(nodeName, 1L);
     }
-    log.info("即将使用节点的并行度为:" + resultNodeMap);
-    return resultNodeMap;
-  }
-
-
-  public PrefixEntity getRedisPrefixByUserIdAndProjectIdAndTaskId(String userId, String projectId, String taskId) {
-    //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
-    UserEntity userEntity = userMapper.selectById(userId);
-    String roleCode = userEntity.getRoleCode();
-    String useType = userEntity.getUseType();
-    String clusterId;
-    if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
-      clusterId = DictConstants.SYSTEM_CLUSTER_ID;
-    } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
-      clusterId = clusterMapper.selectByUserId(userId).getId();
-    } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
-      if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
-        clusterId = clusterMapper.selectByUserId(userId).getId();
-      } else {    //3-4 共享子账户,根据父账户的共享节点排队
-        String parentUserId = userEntity.getCreateUserId();
-        clusterId = clusterMapper.selectByUserId(parentUserId).getId();
-      }
-    } else {
-      throw new RuntimeException("未知账户类型,无法获取集群信息。");
+
+    public void incrementParallelismOfGpuNode(String nodeName, long number) {
+        String key = "gpu-node:" + nodeName + ":parallelism";
+        customRedisClient.increment(key, 1L);
+        log.info("归还节点 {} 的 {} 个 GPU 并行度。", nodeName, number);
     }
-    String clusterPrefix = "cluster:" + clusterId;
-    String clusterRunningPrefix = clusterPrefix + ":running";
-    String projectRunningKey = clusterRunningPrefix + ":" + projectId;
-    String taskTickKey = projectRunningKey + ":task:" + taskId + ":tick";
-    String taskPodKey = projectRunningKey + ":task:" + taskId + ":pod";
-    String taskRetryKey = projectRunningKey + ":task:" + taskId + ":retry";
-    String taskMessageKey = projectRunningKey + ":task:" + taskId + ":message";
-
-    return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).projectRunningKey(projectRunningKey).taskTickKey(taskTickKey).taskPodKey(taskPodKey).taskRetryKey(taskRetryKey).taskMessageKey(taskMessageKey).build();
-
-  }
-
-
-  public PrefixEntity getRedisPrefixByClusterIdAndProjectId(String clusterId, String projectId) {
-    String clusterPrefix = "cluster:" + clusterId;
-    String clusterRunningPrefix = clusterPrefix + ":running";
-    String clusterWaitingPrefix = clusterPrefix + ":waiting";
-    String projectRunningKey = clusterRunningPrefix + ":" + projectId;
-    String projectWaitingKey = clusterWaitingPrefix + ":" + projectId;
-    String projectCheckKey = clusterWaitingPrefix + ":" + projectId + ":check";
-
-    return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).clusterWaitingPrefix(clusterWaitingPrefix).projectRunningKey(projectRunningKey).projectWaitingKey(projectWaitingKey).projectCheckKey(projectCheckKey).build();
-
-  }
-
-
-  /**
-   * 获取 projectId 列表
-   *
-   * @param clusterRunningKeySet 集群下的所有键值对(包括运行中的项目和等待中的项目)
-   * @return projectId 列表
-   */
-  public List<String> getRunningProjectList(Set<String> clusterRunningKeySet) {
-    return clusterRunningKeySet.stream().filter(key -> StringUtil.countSubString(key, ":") == 3).collect(Collectors.toList());
-  }
-
-
-  public PrefixEntity getRedisPrefixByProjectIdAndProjectType(String projectId, String projectType) {
-    String userId;
-    if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
-      userId = manualProjectMapper.selectCreateUserById(projectId);
-    } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
-      userId = autoSubProjectMapper.selectCreateUserById(projectId);
-    } else {
-      throw new RuntimeException("未知的项目类型。");
+
+    public void decrementParallelismOfGpuNode(String nodeName, long number) {
+        String key = "gpu-node:" + nodeName + ":parallelism";
+        customRedisClient.decrement(key, number);
+        log.info("获取节点 {} 的 {} 个 GPU 并行度。", nodeName, number);
     }
 
-    //3 获取用户类型(管理员账户、管理员子账户、普通账户、普通子账户)(独占、共享)
-    UserEntity userEntity = userMapper.selectById(userId);
-    String roleCode = userEntity.getRoleCode();
-    String useType = userEntity.getUseType();
-    String clusterId;
-    if (DictConstants.ROLE_CODE_SYSADMIN.equals(roleCode) || DictConstants.ROLE_CODE_ADMIN.equals(roleCode)) {  //3-1 管理员账户和管理员子账户直接执行
-      clusterId = DictConstants.SYSTEM_CLUSTER_ID;
-    } else if (DictConstants.ROLE_CODE_UESR.equals(roleCode)) { //3-2 普通账户,不管是独占还是共享,都在自己的集群里排队,根据自己的独占节点排队
-      clusterId = clusterMapper.selectByUserId(userId).getId();
-    } else if (DictConstants.ROLE_CODE_SUBUESR.equals(roleCode)) {
-      if (DictConstants.USER_TYPE_EXCLUSIVE.equals(useType)) {   //3-3 普通子账户,根据自己的独占节点排队
-        clusterId = clusterMapper.selectByUserId(userId).getId();
-      } else {    //3-4 共享子账户,根据父账户的共享节点排队
-        String parentUserId = userEntity.getCreateUserId();
-        clusterId = clusterMapper.selectByUserId(parentUserId).getId();
-      }
-    } else {
-      throw new RuntimeException("未知账户类型,无法获取集群信息!");
+
+    public List<String> getWaitingProjectMessageKeys() {
+        final Set<String> keys = stringRedisTemplate.keys("*");
+        if (CollectionUtil.isEmpty(keys)) {
+            return new ArrayList<>();
+        } else {
+            return keys.stream().filter(key -> key.contains("waiting") && key.contains("message")).collect(Collectors.toList());
+        }
     }
-    String clusterPrefix = "cluster:" + clusterId;
-    String clusterRunningPrefix = clusterPrefix + ":running";
-    String clusterWaitingPrefix = clusterPrefix + ":waiting";
-    String projectRunningKey = clusterRunningPrefix + ":" + projectId;
-    String projectWaitingKey = clusterWaitingPrefix + ":" + projectId;
-    String projectCheckKey = projectRunningKey + ":check";
-
-    return PrefixEntity.builder().clusterPrefix(clusterPrefix).clusterRunningPrefix(clusterRunningPrefix).clusterWaitingPrefix(clusterWaitingPrefix).projectRunningKey(projectRunningKey).projectWaitingKey(projectWaitingKey).projectCheckKey(projectCheckKey).build();
-  }
-
-
-  public void incrementOneParallelismOfGpuNode(String nodeName) {
-    incrementParallelismOfGpuNode(nodeName, 1L);
-  }
-
-  public void incrementParallelismOfGpuNode(String nodeName, long number) {
-    String key = "gpu-node:" + nodeName + ":parallelism";
-    customRedisClient.increment(key, 1L);
-    log.info("归还节点 {} 的 {} 个 GPU 并行度。", nodeName, number);
-  }
-
-  public void decrementParallelismOfGpuNode(String nodeName, long number) {
-    String key = "gpu-node:" + nodeName + ":parallelism";
-    customRedisClient.decrement(key, number);
-    log.info("获取节点 {} 的 {} 个 GPU 并行度。", nodeName, number);
-  }
-
-
-  public List<String> getWaitingProjectMessageKeys() {
-    final Set<String> keys = stringRedisTemplate.keys("*");
-    if (CollectionUtil.isEmpty(keys)) {
-      return new ArrayList<>();
-    } else {
-      return keys.stream().filter(key -> key.contains("waiting") && key.contains("message")).collect(Collectors.toList());
+
+    public void resetNodeParallelism() {
+        kubernetesConfiguration.getNodeList().forEach((node) -> customRedisClient.set("gpu-node:" + node.getHostname() + ":parallelism", node.getParallelism() + ""));
+        esminiConfiguration.getNodeList().forEach((node) -> customRedisClient.set("cpu-node:" + node.getHostname() + ":parallelism", node.getParallelism() + ""));
     }
-  }
-
-  public void resetNodeParallelism() {
-    kubernetesConfiguration.getNodeList().forEach((node) -> customRedisClient.set("gpu-node:" + node.getHostname() + ":parallelism", node.getParallelism() + ""));
-    esminiConfiguration.getNodeList().forEach((node) -> customRedisClient.set("cpu-node:" + node.getHostname() + ":parallelism", node.getParallelism() + ""));
-  }
-
-  /**
-   * 判断项目完成度
-   */
-  @Synchronized
-  public boolean complete(PrefixEntity redisPrefix, String projectId) {
-    boolean result = false;
-    ProjectMessageDTO projectMessageDTO = JsonUtil.jsonToBean(customRedisClient.get(redisPrefix.getProjectRunningKey()), ProjectMessageDTO.class);
-    int taskTotal = projectMessageDTO.getTaskTotal();
-    int taskCompleted = projectMessageDTO.getTaskCompleted();
-    log.info("complete() 项目 " + projectId + " 完成进度为:" + (taskCompleted + 1) + "/" + taskTotal);
-    if (taskCompleted + 1 == taskTotal) {
-      result = true;
-    } else {    // 项目没有完成
-      projectMessageDTO.setTaskCompleted(taskCompleted + 1);  // 增加已完成任务数
-      stringRedisTemplate.opsForValue().set(redisPrefix.getProjectRunningKey(), JsonUtil.beanToJson(projectMessageDTO));
+
+    /**
+     * 判断项目完成度
+     */
+    @Synchronized
+    public boolean complete(PrefixEntity redisPrefix, String projectId) {
+        boolean result = false;
+        ProjectMessageDTO projectMessageDTO = JsonUtil.jsonToBean(customRedisClient.get(redisPrefix.getProjectRunningKey()), ProjectMessageDTO.class);
+        int taskTotal = projectMessageDTO.getTaskTotal();
+        int taskCompleted = projectMessageDTO.getTaskCompleted();
+        log.info("complete() 项目 " + projectId + " 完成进度为:" + (taskCompleted + 1) + "/" + taskTotal);
+        if (taskCompleted + 1 == taskTotal) {
+            result = true;
+        } else {    // 项目没有完成
+            projectMessageDTO.setTaskCompleted(taskCompleted + 1);  // 增加已完成任务数
+            stringRedisTemplate.opsForValue().set(redisPrefix.getProjectRunningKey(), JsonUtil.beanToJson(projectMessageDTO));
+        }
+        return result;
     }
-    return result;
-  }
 }

+ 449 - 0
simulation-resource-scheduler/src/main/java/com/css/simulation/resource/scheduler/util/TaskUtil.java

@@ -0,0 +1,449 @@
+package com.css.simulation.resource.scheduler.util;
+
+import api.common.pojo.constants.DictConstants;
+import api.common.util.*;
+import com.css.simulation.resource.scheduler.configuration.feign.VideoFeignClient;
+import com.css.simulation.resource.scheduler.configuration.kubernetes.KubernetesConfiguration;
+import com.css.simulation.resource.scheduler.entity.*;
+import com.css.simulation.resource.scheduler.mapper.*;
+import com.css.simulation.resource.scheduler.service.TaskIndexManager;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.kubernetes.client.openapi.ApiClient;
+import io.minio.MinioClient;
+import lombok.SneakyThrows;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.ibatis.session.ExecutorType;
+import org.apache.ibatis.session.SqlSession;
+import org.apache.ibatis.session.SqlSessionFactory;
+import org.apache.kafka.clients.admin.Admin;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.data.redis.core.StringRedisTemplate;
+import org.springframework.stereotype.Component;
+
+import javax.annotation.Resource;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStreamReader;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+@Component
+@Slf4j
+public class TaskUtil {
+
+ @Value("${minio.bucket-name}")
+ private String bucketName;
+ @Value("${scheduler.linux-path.score-py}")
+ private String pyPath;
+ @Value("${scheduler.linux-path.temp}")
+ private String linuxTempPath;
+ @Value("${simulation-cloud.client-id}")
+ private String clientId;
+ @Value("${simulation-cloud.client-secret}")
+ private String clientSecret;
+ @Value("${simulation-cloud.token-uri}")
+ private String tokenUri;
+ @Value("${simulation-cloud.evaluation-level-uri}")
+ private String evaluationLevelUri;
+ @Value("${scheduler.minio-path.project-result}")
+ private String resultPathMinio;
+ @Resource
+ private StringRedisTemplate stringRedisTemplate;
+ @Resource
+ private TaskMapper taskMapper;
+ @Resource
+ private MinioClient minioClient;
+ @Resource
+ private ManualProjectMapper manualProjectMapper;
+ @Resource
+ private AutoSubProjectMapper autoSubProjectMapper;
+ @Resource
+ private TaskIndexManager taskIndexManager;
+ @Resource
+ private IndexMapper indexMapper;
+ @Resource
+ private ScoringRulesMapper scoringRulesMapper;
+ @Resource
+ private CloseableHttpClient closeableHttpClient;
+ @Resource
+ private RequestConfig requestConfig;
+ @Resource
+ private ProjectUtil projectUtil;
+ @Resource
+ private VideoFeignClient videoFeignClient;
+ @Resource
+ private SqlSessionFactory sqlSessionFactory;
+ @Resource
+ private KubernetesConfiguration kubernetesConfiguration;
+ @Resource
+ private ApiClient apiClient;
+ @Resource(name = "myKafkaAdmin")
+ private Admin admin;
+
+ public void batchInsertTask(List<TaskEntity> taskEntityList) {
+  try (SqlSession sqlSession = sqlSessionFactory.openSession(ExecutorType.BATCH, false)) {
+   TaskMapper taskMapper1 = sqlSession.getMapper(TaskMapper.class);
+   for (TaskEntity taskEntity : taskEntityList) {
+    taskMapper1.insert(taskEntity);
+   }
+   sqlSession.commit();
+  }
+ }
+
+ public boolean handleErrorTask(PrefixEntity redisPrefix, String projectId, String projectType, String maxSimulationTime, String taskId, String state, String podName) {
+  return isProjectCompleted(redisPrefix, projectId, projectType, maxSimulationTime, taskId, state, podName);
+ }
+
+ /**
+  * 加事务的话高并发情况下会死锁
+  */
+ @SneakyThrows
+ public boolean isProjectCompleted(PrefixEntity redisPrefix, String projectId, String projectType, String maxSimulationTime, String taskId, String state, String podName) {
+  boolean result;
+  String nodeName = projectUtil.getNodeNameOfPod(projectId, podName);
+  if (DictConstants.TASK_RUNNING.equals(state)) {  // 运行中的 pod 无需删除
+   // 将运行中的任务的 pod 名称放入 redis
+   stringRedisTemplate.opsForValue().set(redisPrefix.getTaskPodKey(), podName);
+   taskTick(taskId); // 刷新一下心跳
+   log.info("修改任务 " + taskId + " 的状态为 " + state + ",pod 名称为:" + podName);
+   taskMapper.updateStateWithStartTime(taskId, state, TimeUtil.getNowForMysql());
+   return false;
+  } else { // 结束的 pod 都直接删除,并判断项目是否完成
+   // -------------------------------- 处理状态 --------------------------------
+   log.info("修改任务 {} 的状态为 {} ,pod 名称为 {} ,并删除 pod。", taskId, state, podName);
+   if (DictConstants.TASK_ABORTED.equals(state)) {
+    String minioPathOfErrorLog = resultPathMinio + projectId + "/" + taskId + "error.log";
+    boolean objectExist = MinioUtil.isObjectExist(minioClient, bucketName, minioPathOfErrorLog);
+    String targetEvaluate;
+    if (objectExist) {
+     String errorString = MinioUtil.downloadToString(minioClient, bucketName, minioPathOfErrorLog);
+     String[] lines = errorString.split("\n");
+     StringBuilder errorMessage = new StringBuilder();
+     for (String line : lines) {
+      if (line.startsWith("Original Error")) {
+       errorMessage.append(line).append("\n");
+      }
+      if (line.startsWith("Possible Cause")) {
+       errorMessage.append(line);
+       break;
+      }
+     }
+     targetEvaluate = errorMessage.toString();
+    } else {
+     targetEvaluate = DictConstants.TASK_ERROR_REASON_2;
+    }
+    taskMapper.updateFailStateWithStopTime(taskId, state, TimeUtil.getNowForMysql(), targetEvaluate);
+   } else if (DictConstants.TASK_TERMINATED.equals(state)) {
+    taskMapper.updateFailStateWithStopTime(taskId, state, TimeUtil.getNowForMysql(), DictConstants.TASK_ERROR_REASON_3);
+   } else if (DictConstants.TASK_ANALYSIS.equals(state)) { // 该状态只会获得一次
+    taskMapper.updateSuccessStateWithStopTime(taskId, state, TimeUtil.getNowForMysql());
+    // 查询项目是否使用 gpu 生成视频(0是1否)
+    String isChoiceGpu = projectUtil.getProjectByProjectId(projectId).getIsChoiceGpu();
+    if (DictConstants.VIDEO_GPU.equals(isChoiceGpu)) {
+     log.info("项目 {} 使用 GPU 生成视频。", projectId);
+    } else if (DictConstants.VIDEO_CPU.equals(isChoiceGpu)) {
+     log.info("项目 {} 使用 CPU 生成视频。", projectId);
+     videoFeignClient.generateVideo(projectId, projectType, maxSimulationTime, taskId);
+    } else {
+     throw new RuntimeException("未设置视频生成。");
+    }
+   }
+   // -------------------------------- 判断项目是否结束 --------------------------------
+   result = projectUtil.complete(redisPrefix, projectId);
+   if (!result) {
+    log.info("项目 " + projectId + " 还未运行完成。");
+    projectUtil.createNextPod(projectId, nodeName, podName);
+   } else {
+    //如果项目已完成先把 pod 删除,并归还并行度
+    KubernetesUtil.deletePod2(apiClient, kubernetesConfiguration.getNamespace(), podName);
+    projectUtil.incrementOneParallelismOfGpuNode(nodeName);
+   }
+   RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getTaskMessageKey());
+   RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getTaskPodKey());
+  }
+  return result;
+ }
+
+
+ /**
+  * @param userId 项目创建用户的 id
+  */
+ @SneakyThrows
+ public void score(String projectRunningKey, String userId, String projectId, String projectType) {
+  stringRedisTemplate.delete(projectRunningKey);
+  // -------------------------------- 打分 --------------------------------
+  ProjectEntity projectEntity = null;
+  if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+   projectEntity = manualProjectMapper.selectById(projectId);
+  } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+   projectEntity = autoSubProjectMapper.selectById(projectId);
+  }
+  if (projectEntity == null) {
+   log.error("不存在项目 {}" + projectId);
+   return;
+  }
+  String packageId = projectEntity.getScenePackageId();  // 场景测试包 id,指标的rootId
+  TimeUnit.SECONDS.sleep(10); // 先等一下数据库更新
+  List<TaskEntity> taskList = taskMapper.selectTaskListByProjectId(projectId);  // 所有任务信息
+  if (CollectionUtil.isEmpty(taskList)) {
+   log.error("项目 {} 下没有查询到任务!", projectId);
+   return;
+  }
+  indexMapper.deleteFirstByProjectId(projectId);
+  indexMapper.deleteLastByProjectId(projectId);
+  //1 查询场景包对应指标
+  String allIndexKey = "project:" + projectId + ":package:" + packageId + ":all";
+  String leafIndexKey = "project:" + projectId + ":package:" + packageId + ":leaf";
+  String allIndexTemplateListJson = stringRedisTemplate.opsForValue().get(allIndexKey);
+  String leafIndexTemplateListJson = stringRedisTemplate.opsForValue().get(leafIndexKey);
+  List<IndexTemplateEntity> allIndexTemplateList = JsonUtil.jsonToList(allIndexTemplateListJson, IndexTemplateEntity.class);
+  List<IndexTemplateEntity> leafIndexTemplateList = JsonUtil.jsonToList(leafIndexTemplateListJson, IndexTemplateEntity.class);
+  log.info("共有 " + leafIndexTemplateList.size() + "个叶子节点:" + leafIndexTemplateListJson);
+  int maxLevel = 1; // 用于计算指标得分
+  List<LeafIndexEntity> leafIndexList = new ArrayList<>();
+  for (int i = 0; i < leafIndexTemplateList.size(); i++) {
+   String scoreExplain = null; // 每个叶子指标下的任务的得分说明一样和叶子指标一致
+   IndexTemplateEntity leafIndexTemplate = leafIndexTemplateList.get(i);
+   String indexId = leafIndexTemplate.getIndexId(); // 叶子指标id
+   String parentId = leafIndexTemplate.getParentId(); // 父 id
+   String rootId = leafIndexTemplate.getRootId(); // 包 id
+   String weight = leafIndexTemplate.getWeight(); // 权重
+   Integer packageLevel = leafIndexTemplate.getPackageLevel(); // 几级指标
+   String ruleName = leafIndexTemplate.getRuleName();    // 打分脚本名称,例如 AEB_1-1
+   String ruleDetails = leafIndexTemplate.getRuleDetails();    // 打分脚本内容
+   if (packageLevel > maxLevel) {
+    maxLevel = packageLevel;
+   }
+   log.info("开始执行对第 " + (i + 1) + " 个叶子节点 " + indexId + " 进行打分!");
+   // 根据叶子指标id查询评分规则创建用户id
+   String createUserIdOfRule = scoringRulesMapper.selectCreateUserIdByIndexId(indexId);
+   //1 判断有没有用户目录,没有则复制
+   String scoreDirectoryOfUser = linuxTempPath + "score/" + createUserIdOfRule + "/";
+   if (!new File(scoreDirectoryOfUser + "main.py").exists()) {
+    // 复制 main.py
+    FileUtil.createDirectory(scoreDirectoryOfUser);
+    FileUtil.cpR(pyPath, scoreDirectoryOfUser);
+   }
+   //2 将打分规则保存到script目录
+
+   String ruleFilePath = scoreDirectoryOfUser + "scripts/" + ruleName.split("_")[0] + "/" + ruleName + ".py";
+   FileUtil.writeInputStreamToLocalFile(IoUtil.stringToInputStream(ruleDetails), ruleFilePath);
+   log.info("将叶子节点 " + indexId + " 对应的打分规则保存到临时目录:" + ruleFilePath);
+   List<TaskEntity> taskListOfLeafIndex = taskList.stream().filter(task -> indexId.equals(task.getLastTargetId())).collect(Collectors.toList());
+   log.info("叶子节点 " + indexId + " 包括 " + taskListOfLeafIndex.size() + " 个任务:" + taskListOfLeafIndex);
+   // 计算叶子指标的得分
+   // 使用 stream 流会出现无法进入循环的情况
+   for (TaskEntity taskOfLeaf : taskListOfLeafIndex) {
+    String task2Id = taskOfLeaf.getId();
+
+    String runState = taskOfLeaf.getRunState();
+    log.info("TaskManager--score 任务 " + task2Id + " 的运行状态为:" + runState);
+    if (DictConstants.TASK_ANALYSIS.equals(runState)) {
+     taskMapper.updateSuccessStateWithStopTime(task2Id, DictConstants.TASK_ANALYSING, TimeUtil.getNowForMysql());
+     // 计算每个任务的得分
+     String result1OfMinio = taskOfLeaf.getRunResultFilePath() + "/Ego.csv";
+     String result1OfLinux = linuxTempPath + result1OfMinio;
+     String result2OfMinio = taskOfLeaf.getRunResultFilePath() + "/evaluation.csv";
+     String result2OfLinux = linuxTempPath + result2OfMinio;
+     String scoreCommand = "python3 " + scoreDirectoryOfUser + "main.py " + result1OfLinux + " " + result2OfLinux + " " + taskOfLeaf.getSceneType() + " " + ruleName; // 指定打分脚本
+     String scoreResult;
+     ScoreEntity score = null;
+     log.info("下载 minio 上的结果文件 " + result1OfMinio + " 和 " + result2OfMinio + " 到临时目录:" + linuxTempPath);
+     MinioUtil.downloadToFile(minioClient, bucketName, result1OfMinio, result1OfLinux);  // 也可改成下载到指定ip的服务器上,需要保证和打分脚本在一台机器上。
+     MinioUtil.downloadToFile(minioClient, bucketName, result2OfMinio, result2OfLinux);  // 也可改成下载到指定ip的服务器上,需要保证和打分脚本在一台机器上。
+     log.info("开始执行打分命令:" + scoreCommand);
+     Runtime r = Runtime.getRuntime();
+     Process p = r.exec(scoreCommand, null, new File(scoreDirectoryOfUser));
+     BufferedReader br = new BufferedReader(new InputStreamReader(p.getInputStream()));
+     StringBuilder sb = new StringBuilder();
+     String inline;
+     while (null != (inline = br.readLine())) {
+      sb.append(inline).append("\n");
+     }
+     scoreResult = sb.toString();
+     log.info("项目" + projectId + " 的任务 " + task2Id + " 打分结束,结果为:" + scoreResult);
+     String replace = StringUtil.replace(scoreResult, "'", "\"");
+     try {
+      score = JsonUtil.jsonToBean(replace, ScoreEntity.class);
+     } catch (Exception e) { // 打分失败
+      log.info("项目" + projectId + " 的任务 " + task2Id + " 打分失败:", e);
+     }
+     if (score != null) {
+      taskOfLeaf.setReturnSceneId(score.getUnit_scene_ID());
+      taskOfLeaf.setTargetEvaluate(score.getEvaluate_item());
+      taskOfLeaf.setScoreExplain(score.getScore_description());
+      taskOfLeaf.setModifyUserId(userId);
+      taskOfLeaf.setModifyTime(TimeUtil.getNowForMysql());
+      scoreExplain = score.getScore_description();
+      taskOfLeaf.setRunState(DictConstants.TASK_COMPLETED);
+      double taskScore = score.getUnit_scene_score();
+      if (taskScore == -1.0) {
+       taskOfLeaf.setScore(0.0);
+       taskOfLeaf.setScored(false);
+      } else {
+       taskOfLeaf.setScore(NumberUtil.cut(taskScore, 2));
+       taskOfLeaf.setScored(true);
+      }
+      taskMapper.updateSuccessState(taskOfLeaf, DictConstants.TASK_COMPLETED);
+     } else {
+      //1 修改任务状态为 aborted
+      taskOfLeaf.setRunState(DictConstants.TASK_ABORTED);
+      taskOfLeaf.setScore(0.0);
+      taskOfLeaf.setScored(false);
+      taskMapper.updateFailStateWithStopTime(task2Id, DictConstants.TASK_ABORTED, TimeUtil.getNowForMysql(), DictConstants.TASK_ERROR_REASON_5);
+     }
+    }
+   }
+
+   // 全部参与计算
+   // 计算不合格的任务数(不到100分就是不合格,执行失败的不算)
+   // 计算叶子指标下任务得分总和
+   int errorSceneNumber = 0;   // 仿真失败的任务
+   int notScoredSceneNumber = 0;   // 评分失败的任务
+   int notStandardSceneNumber = 0;
+   int standardSceneNumber = 0;
+   double leafSum = 0.0;
+   for (TaskEntity task : taskListOfLeafIndex) {
+    Double scoreTemp = task.getScore();
+    if (scoreTemp == null) {   // 失败状态的任务是没有分数的,计作 0 分。
+     errorSceneNumber++;
+     scoreTemp = 0.0;
+    } else if (task.getScored() == null || !task.getScored()) {  // 如果评分失败,也计作 0 分
+     notScoredSceneNumber++;
+     scoreTemp = 0.0;
+    } else if (scoreTemp < 100.0) {
+     notStandardSceneNumber++;
+    } else if (scoreTemp == 100.0) {
+     standardSceneNumber++;
+    }
+    // 计算分数总和
+    leafSum += scoreTemp;
+   }
+
+   // 计算任务的个数
+   long taskNumberToScore = taskListOfLeafIndex.size();
+   log.info("项目 " + projectId + " 的叶子指标 " + indexId + " 下参与计算的任务总数为 " + taskNumberToScore + ":仿真异常场景个数 " + errorSceneNumber + "、未达标场景个数 " + notStandardSceneNumber + "、达标场景个数 " + standardSceneNumber);
+
+   // 计算叶子指标得分(任务得分总和 / 任务数量)
+   double leafIndexScore = NumberUtil.cut(leafSum / taskNumberToScore, 2);
+   // 创建叶子指标对象
+   leafIndexTemplate.setTempScore(leafIndexScore);
+
+   LeafIndexEntity leafIndex = LeafIndexEntity.builder().id(StringUtil.getRandomUUID()).pId(projectId).target(leafIndexTemplate.getIndexId()).errorSceneNum(errorSceneNumber).notScoredSceneNum(notScoredSceneNumber).notStandardSceneNum(notStandardSceneNumber).standardSceneNum(standardSceneNumber).score(leafIndexScore).indexId(indexId).parentId(parentId).rootId(rootId).weight(weight).scoreExplain(scoreExplain).packageLevel(packageLevel).build();
+   leafIndex.setCreateUserId(userId);
+   leafIndex.setCreateTime(TimeUtil.getNowForMysql());
+   leafIndex.setModifyUserId(userId);
+   leafIndex.setModifyTime(TimeUtil.getNowForMysql());
+   leafIndex.setIsDeleted("0");
+
+   leafIndexList.add(leafIndex);
+  }
+  // 保存叶子指标得分
+  taskIndexManager.batchInsertLeafIndex(leafIndexList);
+  // 保存一级指标分数
+  log.info("项目 " + projectId + " 的所有任务分数为:" + taskList);
+  computeFirst(leafIndexList, allIndexTemplateList, projectId, maxLevel);
+  log.info("项目 " + projectId + " 打分完成!");
+ }
+
+ public void computeFirst(List<LeafIndexEntity> leafIndexList, List<IndexTemplateEntity> allIndexTemplateList, String projectId, int maxLevel) {
+
+  log.info("计算父指标得分:" + leafIndexList);
+  Iterator<LeafIndexEntity> leafTaskIndexIterator = leafIndexList.iterator();
+  // 把 1 级的指标得分直接保存
+  while (leafTaskIndexIterator.hasNext()) {
+   LeafIndexEntity leafTaskIndex = leafTaskIndexIterator.next();
+   if (leafTaskIndex.getPackageLevel() == 1) {
+    leafTaskIndex.setCreateUserId(leafTaskIndex.getCreateUserId());
+    leafTaskIndex.setCreateTime(TimeUtil.getNowForMysql());
+    leafTaskIndex.setModifyUserId(leafTaskIndex.getModifyUserId());
+    leafTaskIndex.setModifyTime(TimeUtil.getNowForMysql());
+    leafTaskIndex.setIsDeleted("0");
+    indexMapper.insertFirstIndex(leafTaskIndex);
+    leafTaskIndexIterator.remove();
+   }
+  }
+  if (leafIndexList.size() > 0) {
+   List<LeafIndexEntity> nextLevelIndexList = new ArrayList<>();
+   // 找出等级和 maxLevel 不相同的指标暂时不计算
+   leafIndexList.stream().filter(po -> maxLevel != po.getPackageLevel()).forEach(nextLevelIndexList::add);
+   // 找出等级和 maxLevel 相同的指标并根据父指标分组
+   Map<String, List<LeafIndexEntity>> sonTaskIndexMap = leafIndexList.stream().filter(po -> maxLevel == po.getPackageLevel()).collect(Collectors.groupingBy(LeafIndexEntity::getParentId));
+   Set<String> parentIdSet = sonTaskIndexMap.keySet();
+   List<String> parentIdList = CollectionUtil.setToList(parentIdSet);
+
+   List<IndexTemplateEntity> parentIndexTemplateList = allIndexTemplateList.stream().filter(indexTemplate -> parentIdList.contains(indexTemplate.getIndexId())).collect(Collectors.toList());
+   // 计算父指标得分
+   parentIndexTemplateList.forEach(indexTemplate -> {
+    String weight = indexTemplate.getWeight();
+    List<LeafIndexEntity> sonTaskIndexList = sonTaskIndexMap.get(indexTemplate.getIndexId());
+    double parentScore = NumberUtil.cut(sonTaskIndexList.stream().mapToDouble(taskIndex -> taskIndex.getScore() * Double.parseDouble(taskIndex.getWeight()) / 100).sum(), 2);
+    LeafIndexEntity parentTaskIndex = LeafIndexEntity.builder().id(StringUtil.getRandomUUID()).pId(projectId).target(indexTemplate.getIndexId()).score(parentScore).indexId(indexTemplate.getIndexId()).parentId(indexTemplate.getParentId()).rootId(indexTemplate.getRootId()).weight(weight).packageLevel(maxLevel - 1).build();
+    nextLevelIndexList.add(parentTaskIndex);
+   });
+   // 将父指标作为叶子指标递归
+   computeFirst(nextLevelIndexList, allIndexTemplateList, projectId, maxLevel - 1);
+  }
+ }
+
+ @SneakyThrows
+ public void evaluationLevel(String projectId) {
+  String tokenUrl = tokenUri + "?grant_type=client_credentials" + "&client_id=" + clientId + "&client_secret=" + clientSecret;
+  log.info("获取仿真云平台 token:" + tokenUrl);
+  String response = HttpUtil.get(closeableHttpClient, requestConfig, tokenUrl);
+  ObjectMapper objectMapper = new ObjectMapper();
+  JsonNode jsonNode = objectMapper.readTree(response);
+  String accessToken = jsonNode.path("access_token").asText();
+  log.info("仿真云平台 token 为:" + accessToken);
+  Map<String, String> headers = new HashMap<>();
+  headers.put("Authorization", "Bearer " + accessToken);
+  Map<String, String> params = new HashMap<>();
+  params.put("id", projectId);
+  String post = HttpUtil.post(closeableHttpClient, requestConfig, evaluationLevelUri, headers, params);
+  log.info("访问仿真云平台评价等级接口:" + evaluationLevelUri + ",请求头为:" + headers + ",请求体为:" + params + "结果为:" + post);
+ }
+
+
+ public Boolean taskConfirm(String taskId) {
+  // 查询 task 如果不是 pending 则不执行
+  String state = taskMapper.selectStateById(taskId);
+  return DictConstants.TASK_PENDING.equals(state);
+ }
+
+ public void taskTick(String taskId) {
+  log.info("收到任务 " + taskId + " 的心跳。");
+  TaskEntity taskEntity = taskMapper.selectById(taskId);
+  String projectId = taskEntity.getPId();
+  String userId = taskEntity.getCreateUserId();
+  // 刷新 redis 心跳时间
+  PrefixEntity redisPrefix = projectUtil.getRedisPrefixByUserIdAndProjectIdAndTaskId(userId, projectId, taskId);
+  if (RedisUtil.getStringByKey(stringRedisTemplate, redisPrefix.getProjectRunningKey()) != null) {
+   stringRedisTemplate.opsForValue().set(redisPrefix.getTaskTickKey(), TimeUtil.getNowString());
+  }
+
+ }
+
+
+ @SneakyThrows
+ public void done(PrefixEntity redisPrefix, String projectId, String projectType) {
+  // 更新项目状态为已完成
+  if (DictConstants.PROJECT_TYPE_MANUAL.equals(projectType)) {
+   manualProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_COMPLETED, TimeUtil.getNowForMysql());
+  } else if (DictConstants.PROJECT_TYPE_AUTO_SUB.equals(projectType)) {
+   autoSubProjectMapper.updateProjectState(projectId, DictConstants.PROJECT_COMPLETED, TimeUtil.getNowForMysql());
+  }
+  // 删除 kafka topic
+  ApacheKafkaUtil.deleteTopic(admin, projectId);
+  // 删除 redis 中的 项目运行信息 键值对
+  RedisUtil.deleteByPrefix(stringRedisTemplate, redisPrefix.getProjectRunningKey());
+  RedisUtil.deleteByPrefix(stringRedisTemplate, "project:" + projectId);
+  // 删除剩余 yaml
+  projectUtil.deleteYamlByProjectId(projectId);
+ }
+
+
+}