|
@@ -1,161 +1,210 @@
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
+package com.css.simulation.resource.scheduler.scheduler;
|
|
|
+
|
|
|
+import api.common.pojo.constants.DictConstants;
|
|
|
+import api.common.util.CollectionUtil;
|
|
|
+import api.common.util.SshUtil;
|
|
|
+import api.common.util.StringUtil;
|
|
|
+import api.common.util.TimeUtil;
|
|
|
+import com.css.simulation.resource.scheduler.mapper.ManualProjectMapper;
|
|
|
+import com.css.simulation.resource.scheduler.mapper.TaskMapper;
|
|
|
+import com.css.simulation.resource.scheduler.pojo.po.TaskPO;
|
|
|
+import com.css.simulation.resource.scheduler.service.TaskService;
|
|
|
+import com.css.simulation.resource.scheduler.util.KubernetesUtil;
|
|
|
+import io.kubernetes.client.openapi.ApiClient;
|
|
|
+import io.kubernetes.client.openapi.ApiException;
|
|
|
+import lombok.extern.slf4j.Slf4j;
|
|
|
+import org.apache.sshd.client.SshClient;
|
|
|
+import org.apache.sshd.client.session.ClientSession;
|
|
|
+import org.springframework.beans.factory.annotation.Autowired;
|
|
|
+import org.springframework.beans.factory.annotation.Value;
|
|
|
+import org.springframework.data.redis.core.StringRedisTemplate;
|
|
|
+import org.springframework.kafka.core.KafkaTemplate;
|
|
|
+import org.springframework.scheduling.annotation.Scheduled;
|
|
|
+import org.springframework.stereotype.Component;
|
|
|
+
|
|
|
+import java.io.IOException;
|
|
|
+import java.util.*;
|
|
|
+import java.util.stream.Collectors;
|
|
|
+
|
|
|
+@Component
|
|
|
+@Slf4j
|
|
|
+public class TickScheduler {
|
|
|
+
|
|
|
+ @Value("${scheduler.manual-project.topic}")
|
|
|
+ String manualProjectTopic;
|
|
|
+ @Autowired
|
|
|
+ StringRedisTemplate redisTemplate;
|
|
|
+
|
|
|
+ @Autowired
|
|
|
+ TaskService taskService;
|
|
|
+
|
|
|
+ @Autowired
|
|
|
+ TaskMapper taskMapper;
|
|
|
+ @Autowired
|
|
|
+ ManualProjectMapper projectMapper;
|
|
|
+ @Value("${scheduler.manual-project.job-yaml}")
|
|
|
+ String jobYaml;
|
|
|
+
|
|
|
+ @Value("${scheduler.score.hostname}")
|
|
|
+ String hostnameScore;
|
|
|
+ @Value("${scheduler.score.username}")
|
|
|
+ String usernameScore;
|
|
|
+ @Value("${scheduler.score.password}")
|
|
|
+ String passwordScore;
|
|
|
+
|
|
|
+ @Autowired
|
|
|
+ ApiClient apiClient;
|
|
|
+
|
|
|
+ @Autowired
|
|
|
+ KafkaTemplate<String, String> kafkaTemplate;
|
|
|
+
|
|
|
+
|
|
|
+ @Scheduled(fixedDelay = 60 * 1000)
|
|
|
+ public void retry() throws ApiException {
|
|
|
+
|
|
|
+ Set<String> keys = redisTemplate.keys("manualProject:*");
|
|
|
+
|
|
|
+
|
|
|
+ assert keys != null;
|
|
|
+ List<String> podKeyList = keys.stream().filter(key -> key.contains("pod")).collect(Collectors.toList());
|
|
|
+ Map<String, String> podNameMapShouldBe = new HashMap<>();
|
|
|
+ podKeyList.forEach(podKey -> {
|
|
|
+ String podName = redisTemplate.opsForValue().get(podKey);
|
|
|
+ podNameMapShouldBe.put(podKey, podName);
|
|
|
+ });
|
|
|
+
|
|
|
+ List<String> podNameListReally = KubernetesUtil.getPod(apiClient, "all");
|
|
|
+
|
|
|
+ podNameMapShouldBe.forEach((podKeyShouldBe, podNameShouldBe) -> {
|
|
|
+ if (!podNameListReally.contains(podNameShouldBe)) {
|
|
|
+
|
|
|
+
|
|
|
+ String[] split = podKeyShouldBe.split(":");
|
|
|
+ String projectId = split[1];
|
|
|
+ String taskId = split[2];
|
|
|
+
|
|
|
+ String retry = redisTemplate.opsForValue().get(manualProjectTopic + ":" + projectId + ":" + taskId + ":retry");
|
|
|
+ assert retry != null;
|
|
|
+ int retryNumber = Integer.parseInt(retry);
|
|
|
+ if (retryNumber < 3) {
|
|
|
+ String taskJson = redisTemplate.opsForValue().get(manualProjectTopic + ":" + projectId + ":" + taskId + ":message");
|
|
|
+ kafkaTemplate.send(projectId, taskJson).addCallback(success -> {
|
|
|
+
|
|
|
+ assert success != null;
|
|
|
+ String topic = success.getRecordMetadata().topic();
|
|
|
+
|
|
|
+ int partition = success.getRecordMetadata().partition();
|
|
|
+
|
|
|
+ long offset = success.getRecordMetadata().offset();
|
|
|
+ log.info("------- TickScheduler--retry 发送消息成功:\n"
|
|
|
+ + "主题 topic 为:" + topic + "\n"
|
|
|
+ + "分区 partition 为:" + partition + "\n"
|
|
|
+ + "偏移量为:" + offset + "\n"
|
|
|
+ + "消息体为:" + taskJson);
|
|
|
+ }, failure -> {
|
|
|
+ log.error("------- ManualProjectConsumer 发送消息失败:" + failure.getMessage());
|
|
|
+ });
|
|
|
+ }
|
|
|
+ }
|
|
|
+ });
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ @Scheduled(fixedDelay = 60 * 1000)
|
|
|
+ public void tick() throws IOException {
|
|
|
+
|
|
|
+ SshClient client = SshUtil.getClient();
|
|
|
+ ClientSession session = SshUtil.getSession(client, hostnameScore, usernameScore, passwordScore);
|
|
|
+
|
|
|
+ ArrayList<TaskPO> executingTaskList = taskMapper.selectExecuting();
|
|
|
+ if (CollectionUtil.isEmpty(executingTaskList)) {
|
|
|
+
|
|
|
+
|
|
|
+ for (TaskPO task : executingTaskList) {
|
|
|
+ String taskId = task.getId();
|
|
|
+ String projectId = task.getPId();
|
|
|
+ String s = redisTemplate.opsForValue().get(manualProjectTopic + ":" + projectId + ":" + taskId + ":tick");
|
|
|
+
|
|
|
+ assert s != null;
|
|
|
+ long tickTime = Long.parseLong(s);
|
|
|
+ long timeout = 2 * 60 * 1000L;
|
|
|
+ long now = TimeUtil.getNow();
|
|
|
+ long difference = now - tickTime;
|
|
|
+
|
|
|
+ if (difference > timeout) {
|
|
|
+ String podName = redisTemplate.opsForValue().get(manualProjectTopic + ":" + projectId + ":" + taskId + ":pod");
|
|
|
+ String podDeleteCommand = "kubectl delete pod " + podName;
|
|
|
+ if (podName != null) {
|
|
|
+ log.info("TickScheduler--tick 修改任务 " + taskId + "已超时,状态修改为 Aborted,pod 名称为:" + podName
|
|
|
+ + ",并执行删除 pod 命令:" + podDeleteCommand);
|
|
|
+ SshUtil.execute(session, podDeleteCommand);
|
|
|
+
|
|
|
+ taskMapper.updateFailStateWithStopTime(taskId, DictConstants.TASK_ABORTED, TimeUtil.getNowForMysql(), DictConstants.TASK_ERROR_REASON_1);
|
|
|
+ redisTemplate.delete("podName:" + taskId);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ session.close();
|
|
|
+ client.stop();
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ * 检查如果有 job 在运行但是 pod 全部关闭的情况,此时需要重启一下 job
|
|
|
+ */
|
|
|
+ @Scheduled(fixedDelay = 30 * 1000)
|
|
|
+ public void checkProject() throws IOException {
|
|
|
+ SshClient client = SshUtil.getClient();
|
|
|
+ ClientSession session = SshUtil.getSession(client, hostnameScore, usernameScore, passwordScore);
|
|
|
+
|
|
|
+
|
|
|
+ List<String> projectIdList = projectMapper.selectIdByState("20");
|
|
|
+ log.info("TickScheduler-------checkProject 查询出正在运行中的 project" + projectIdList);
|
|
|
+
|
|
|
+ projectIdList.forEach(projectId -> {
|
|
|
+ try {
|
|
|
+ String key = manualProjectTopic + ":" + projectId + ":check";
|
|
|
+ String lastNowString = redisTemplate.opsForValue().get(manualProjectTopic + ":" + projectId + ":check");
|
|
|
+ String podList = SshUtil.execute(session, "kubectl get pod | grep project-" + projectId);
|
|
|
+ log.info("TickScheduler-------checkProject 项目 " + projectId + " 正在运行的 pod 为:\n" + podList);
|
|
|
+ int taskNumber = StringUtil.countSubString(podList, "project");
|
|
|
+ if (StringUtil.isEmpty(lastNowString) && taskNumber == 0) {
|
|
|
+ redisTemplate.opsForValue().set(key, TimeUtil.getNowString());
|
|
|
+ }
|
|
|
+
|
|
|
+ if (StringUtil.isNotEmpty(lastNowString) && taskNumber == 0) {
|
|
|
+
|
|
|
+
|
|
|
+ long lastNow = Long.parseLong(lastNowString);
|
|
|
+ long now = Long.parseLong(TimeUtil.getNowString());
|
|
|
+
|
|
|
+ if (now - lastNow > (long) 120 * 1000) {
|
|
|
+ redisTemplate.opsForValue().set(key, TimeUtil.getNowString());
|
|
|
+ SshUtil.execute(session, "kubectl delete job project-" + projectId);
|
|
|
+ Thread.sleep(15000);
|
|
|
+ while (true) {
|
|
|
+ log.info("TickScheduler-------checkProject 准备重启项目 " + projectId);
|
|
|
+ String podList2 = SshUtil.execute(session, "kubectl get pod | grep project-" + projectId);
|
|
|
+ log.info("TickScheduler-------checkProject 项目 " + projectId + " 剩余的 pod 信息为:\n" + podList2);
|
|
|
+ int taskNumber2 = StringUtil.countSubString(podList2, "project");
|
|
|
+ if (taskNumber2 == 0) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ Thread.sleep(15000);
|
|
|
+ log.info("TickScheduler-------checkProject 重新执行项目" + projectId);
|
|
|
+ String jobTemplateYamlPathTarget = jobYaml + "project-" + projectId + ".yaml";
|
|
|
+ SshUtil.execute(session, "kubectl apply -f " + jobTemplateYamlPathTarget);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } catch (IOException | InterruptedException e) {
|
|
|
+ e.printStackTrace();
|
|
|
+ }
|
|
|
+ });
|
|
|
+
|
|
|
+ session.close();
|
|
|
+ client.stop();
|
|
|
+
|
|
|
+ }
|
|
|
+}
|