LingxinMeng há 1 ano atrás
pai
commit
046778f74f

+ 1 - 0
pom.xml

@@ -17,6 +17,7 @@
         <module>simulation-oauth-server</module>
         <module>simulation-oauth-client</module>
         <module>simulation-resource-scheduler</module>
+        <module>simulation-resource-scheduler-agent</module>
         <module>simulation-resource-server</module>
         <module>simulation-resource-common</module>
         <module>simulation-resource-monitor</module>

+ 20 - 0
simulation-resource-scheduler-agent/src/main/java/com/css/simulation/resource/scheduler/agent/SimulationResourceSchedulerAgentApplication.java

@@ -0,0 +1,20 @@
+package com.css.simulation.resource.scheduler;
+
+import org.springframework.boot.SpringApplication;
+import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.cloud.client.discovery.EnableDiscoveryClient;
+import org.springframework.cloud.openfeign.EnableFeignClients;
+import org.springframework.scheduling.annotation.EnableScheduling;
+
+
+@SpringBootApplication
+@EnableFeignClients
+@EnableDiscoveryClient
+@EnableScheduling
+public class SimulationResourceSchedulerAgentApplication {
+
+    public static void main(String[] args) {
+        SpringApplication.run(SimulationResourceSchedulerAgentApplication.class, args);
+    }
+
+}

+ 44 - 0
simulation-resource-scheduler-agent/src/main/java/com/css/simulation/resource/scheduler/agent/adapter/controller/TaskController.java

@@ -0,0 +1,44 @@
+package com.css.simulation.resource.scheduler.agent.adapter.controller;
+
+import com.css.simulation.resource.scheduler.agent.app.service.TaskAppService;
+import org.springframework.http.MediaType;
+import org.springframework.web.bind.annotation.*;
+import org.springframework.web.multipart.MultipartFile;
+
+import javax.annotation.Resource;
+
+@RestController
+@RequestMapping("/task")
+public class TaskController {
+
+  @Resource
+  private TaskAppService taskAppService;
+
+  // -------------------------------- Comment --------------------------------
+
+  /**
+   * 保存任务文件 yaml
+   */
+  @PostMapping(value = "/save", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
+  public void taskState(@RequestPart("file") MultipartFile file) {
+    taskAppService.saveDockerComposeYamlOfTask(file);
+  }
+
+  /**
+   * 使用 Docker Compose 启动任务 yaml 文件
+   */
+  @GetMapping("/up")
+  public void upDockerComposeYamlOfOneTaskOfProject(@RequestParam("projectId") String projectId) {
+    taskAppService.upDockerComposeYamlOfOneTaskOfProject(projectId);
+  }
+
+  /**
+   * 使用 Docker Compose 停止任务 yaml 文件
+   */
+  @GetMapping("/down")
+  public void downDockerComposeYamlOfOneTaskOfProject(@RequestParam("yamlName") String yamlName) {
+    taskAppService.downDockerComposeYamlOfOneTaskOfProject(yamlName);
+  }
+
+
+}

+ 37 - 0
simulation-resource-scheduler-agent/src/main/java/com/css/simulation/resource/scheduler/agent/app/service/TaskAppService.java

@@ -0,0 +1,37 @@
+package com.css.simulation.resource.scheduler.agent.app.service;
+
+import api.common.util.FileUtil;
+import api.common.util.OsUtil;
+import api.common.util.SchedulerUtil;
+import lombok.SneakyThrows;
+import org.springframework.stereotype.Service;
+import org.springframework.web.multipart.MultipartFile;
+
+import java.io.File;
+
+@Service
+public class TaskAppService {
+
+  private static final String rootPath = "/opt/data/";
+  private static final String upCommandOfDockerCompose = "docker-compose -f filename up -d";
+  private static final String downCommandOfDockerCompose = "docker-compose -f docker-compose.yml down";
+
+  public void saveDockerComposeYamlOfTask(MultipartFile file) {
+    //1 解析文件名获取项目ID
+    final String fileName = file.getName();
+    final String projectId = SchedulerUtil.getProjectIdFromYamlName(fileName);
+    //2 保存文件
+    FileUtil.writeMultipartFileToLocalFile(file, rootPath + projectId + File.separator + fileName);
+  }
+
+  @SneakyThrows
+  public void upDockerComposeYamlOfOneTaskOfProject(String projectId) {
+    final File file = FileUtil.getRandomFileOfDirectory(rootPath + projectId);
+    OsUtil.exec(upCommandOfDockerCompose.replace("filename", file.getAbsolutePath()));
+  }
+
+  @SneakyThrows
+  public void downDockerComposeYamlOfOneTaskOfProject(String yamlName) {
+    OsUtil.exec(downCommandOfDockerCompose.replace("filename", rootPath + yamlName));
+  }
+}

+ 210 - 0
simulation-resource-scheduler-agent/src/main/resources/bootstrap-dev.yaml

@@ -0,0 +1,210 @@
+server:
+  port: 8004
+  servlet:
+    context-path: /simulation/resource/scheduler
+
+simulation-cloud:
+  client-id: simulation-oauth-client
+  client-secret: hPT7zVteEXvRzS41NhJXoQYqtGmai3W0
+  token-uri: http://10.14.85.241/simulation/oauth/server/token
+  evaluation-level-uri: http://10.14.85.241/simulation/resource/server/simulationProject/saveEvaluationLevel
+
+algorithm-platform:
+  appid: 2af6f44d98104dc5adcbfb49809ff9d5
+  secret: db129a741fde1e9f474199dea24f3901
+  token-uri: https://open.zoogooy.com.cn/cgi-bin/token/token
+  algorithm-addr-uri: https://open.zoogooy.com.cn/cgi-bin/api/icv-algorithm-agg/simulation/download
+
+minio:
+  endpoint: http://10.14.85.242:9000/
+  endpoint-without-http: 10.14.85.242:9000
+  access-key: minioadmin
+  secret-key: 1qaz2wsx!
+  bucket-name: simulation-cloud
+
+spring:
+  mvc:
+    async:
+      request-timeout: 30000
+  servlet:
+    multipart:
+      max-request-size: 10240MB
+      max-file-size: 10240MB
+  datasource:
+    druid:
+      url: jdbc:mysql://10.14.85.240:3306/simulation?characterEncoding=utf8&connectTimeout=60000&socketTimeout=60000&useSSL=false
+      username: root
+      password: 1qaz2wsx!
+      driver-class-name: com.mysql.jdbc.Driver
+      initial-size: 200
+      min-idle: 200
+      max-active: 200
+      max-wait: 300000
+      filter:
+        stat:
+          enabled: true
+        wall:
+          enabled: true
+        slf4j:
+          enabled: true
+      stat-view-servlet:
+        enabled: true
+        url-pattern: /druid/*
+        login-username: druid
+        login-password: 1qaz2wsx!
+  #* -------------------------------- kafka 消息队列 --------------------------------
+  kafka:
+    hostname: 10.14.85.239
+    username: root
+    password: Ubuntu_cicv
+    delete-command: /mnt/disk001/kafka_2.13-3.1.0/bin/kafka-topics.sh --bootstrap-server 10.14.85.239:9092 --delete --topic topicName
+    bootstrap-servers: 10.14.85.239:9092    # 指定连接的 kafka 集群。
+    listener:
+      missing-topics-fatal: false
+    consumer:
+      group-id: simulation-resource-scheduler
+      enable-auto-commit: true
+      auto-commit-interval: 1000
+      auto-offset-reset: latest
+      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
+      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
+      properties:
+        session:
+          timeout:
+            ms: 12000
+        request:
+          timeout:
+            ms: 180000
+    producer:
+      retries: 3
+      acks: 1
+      batch-size: 16384
+      buffer-memory: 33554432
+      key-serializer: org.apache.kafka.common.serialization.StringSerializer
+      value-serializer: org.apache.kafka.common.serialization.StringSerializer
+      properties:
+        linger:
+          ms: 0
+  cache:
+    type: redis
+  redis:
+    host: 10.14.85.240
+    port: 6379
+    password: 1qaz2wsx!
+    connect-timeout: 10000
+    timeout: 10000
+    database: 2
+    lettuce:
+      pool:
+        max-active: 8
+        max-idle: 8
+        min-idle: 0
+        max-wait: 1000
+
+kubernetes:
+  namespace: simulation
+  pod-timeout: 120000
+  image-vtd-gpu: 10.14.85.237:5000/vtd.run.perception.release:latest
+  command-vtd-gpu: /Controller/config/docker_cloud.ini
+  command-vtd-carsim-gpu: /Controller/config/docker_cloud_carsim.ini
+  image-vtd-nogpu: 10.14.85.237:5000/vtd.run.control.release:latest
+  command-vtd-nogpu: /Controller/config/docker_cloud_noig.ini
+  command-vtd-carsim-nogpu: /Controller/config/docker_cloud_noig_carsim.ini
+  carsim-image: 10.14.85.237:5000/carsim:latest
+  carsim-command: /root/VTD_CarSim
+  node-list:
+    - hostname: cicv-node-1
+      parallelism: 2
+    - hostname: cicv-node-2
+      parallelism: 2
+    - hostname: cicv-node-3
+      parallelism: 2
+    - hostname: cicv-node-4
+      parallelism: 2
+    - hostname: cicv-node-5
+      parallelism: 2
+    - hostname: cicv-node-6
+      parallelism: 2
+    - hostname: cicv-node-7
+      parallelism: 2
+    - hostname: cicv-node-8
+      parallelism: 2
+
+
+git:
+  name: gitlab-441-442-443
+  url: 10.14.85.241:441
+
+docker:
+  registry: 10.14.85.237:5000
+  registry-volume: /mnt/disk001/simulation-cloud/docker/registry-5000/data/docker/registry/v2/repositories
+  max-algorithm-image: 30
+  min-algorithm-image: 10
+  node-list:
+    - name: gpu001
+      hostname: 10.14.85.237
+      username: root
+      password: Ubuntu_cicv
+    - name: gpu002
+      hostname: 10.14.85.236
+      username: root
+      password: Ubuntu_cicv
+    - name: gpu003
+      hostname: 10.14.85.238
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-1
+      hostname: 20.7.1.101
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-2
+      hostname: 20.7.1.102
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-3
+      hostname: 20.7.1.103
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-4
+      hostname: 20.7.1.104
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-5
+      hostname: 20.7.1.105
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-6
+      hostname: 20.7.1.106
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-7
+      hostname: 20.7.1.107
+      username: root
+      password: Ubuntu_cicv
+    - name: cicv-node-8
+      hostname: 20.7.1.108
+      username: root
+      password: Ubuntu_cicv
+
+scheduler:
+  simulation-cloud-ip: 10.14.85.241
+  host:
+    hostname: 10.14.85.237
+    username: root
+    password: Ubuntu_cicv
+  start-topic: project
+  stop-topic: stopProject
+  linux-path:
+    temp: /mnt/disk001/simulation-cloud/simulation-resource-scheduler-8004/temp/
+    pod-template-yaml: /mnt/disk001/simulation-cloud/simulation-resource-scheduler-8004/pod-template/pod-template.yaml
+    vtd-pod-template-yaml: /mnt/disk001/simulation-cloud/simulation-resource-scheduler-8004/pod-template/vtd-pod-template.yaml
+    carsim-pod-template-yaml: /mnt/disk001/simulation-cloud/simulation-resource-scheduler-8004/pod-template/carsim-pod-template.yaml
+    pod-yaml-directory: /mnt/disk001/simulation-cloud/simulation-resource-scheduler-8004/pod-yaml/
+    score-py: /SimulationCloud/Evaluate
+  minio-path:
+    project-result: /project/
+
+esmini:
+  node-list:
+    - hostname: simulation003
+      parallelism: 1

+ 13 - 0
simulation-resource-scheduler-agent/src/main/resources/bootstrap-private.yaml

@@ -0,0 +1,13 @@
+# 国汽私有云
+spring:
+  cloud:
+    nacos:
+      discovery:
+        server-addr: 10.14.85.241:8848
+        namespace: 3698bfc2-a612-487a-b2a2-aaad16cd9d9d
+        group: private
+      config:
+        server-addr: 10.14.85.241:8848
+        namespace: 3698bfc2-a612-487a-b2a2-aaad16cd9d9d
+        file-extension: yaml
+        group: private

+ 10 - 0
simulation-resource-scheduler-agent/src/main/resources/bootstrap-test.yaml

@@ -0,0 +1,10 @@
+spring:
+  cloud:
+    nacos:
+      discovery:
+        server-addr: 47.93.135.21:8848
+        namespace: 3698bfc2-a612-487a-b2a2-aaad16cd9d9d
+      config:
+        server-addr: 47.93.135.21:8848
+        namespace: 3698bfc2-a612-487a-b2a2-aaad16cd9d9d
+        file-extension: yaml

+ 9 - 0
simulation-resource-scheduler-agent/src/main/resources/bootstrap.yaml

@@ -0,0 +1,9 @@
+spring:
+  servlet:
+    multipart:
+      max-file-size: 10GB
+      max-request-size: 10GB
+  application:
+    name: simulation-resource-scheduler
+  profiles:
+    active: dev

+ 30 - 0
simulation-resource-scheduler-agent/src/main/resources/docker-compose/vtd-template.yaml

@@ -0,0 +1,30 @@
+version: '3.3'
+
+services:
+
+  vtd:
+    image: vtd-image
+    pull_policy: always
+    restart: "no"
+    command: /Controller/VTDController vtd-command kafka-topic
+    environment:
+      PodName: pod-name
+      LM_LICENSE_FILE: "27500@10.14.85.247"
+      SIMULATION_CLOUD_IP: simulation-cloud-ip
+      KAFKA_IP: kafka-ip
+      MINIO_IP: minio-ip
+      MINIO_ACCESS_KEY: minio-access-key
+      MINIO_SECRET_KEY: minio-secret-key
+      KAFKA_PARTITION: kafka-partition
+      KAFKA_OFFSET: kafka-offset
+      CPU_ORDER: cpu-order
+    devices:
+      - "/dev/nvidia0:/dev/nvidia0"
+      - "/dev/nvidiactl:/dev/nvidiactl"
+
+  algorithm:
+    image: algorithm-image
+    pull_policy: always
+    restart: "no"
+    command: /run.sh
+    network_mode: service:vtd

Diff do ficheiro suprimidas por serem muito extensas
+ 18 - 0
simulation-resource-scheduler-agent/src/main/resources/kubernetes/config


+ 50 - 0
simulation-resource-scheduler-agent/src/main/resources/kubernetes/template/job/job-template.yaml

@@ -0,0 +1,50 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: job-cloud-simulation
+  namespace: default
+  labels:
+    user: EY
+spec:
+  completions: completions-number
+  parallelism: parallelism-number
+  template:
+    metadata:
+      name: pod-cloud-simulation
+    spec:
+      nodeName: node-name
+      hostAliases:
+        - ip: 172.17.0.184
+          hostnames:
+            - cicvtest002
+      containers:
+        - name: vtd-container
+          image: vtd.run.perception:latest
+          imagePullPolicy: Never
+          command: [ "/Controller/VTDController", "/Controller/config/docker_cloud.ini", "projectId"]
+          env:
+            - name: PodName
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: LM_LICENSE_FILE
+              value: 27500@172.14.1.103
+          volumeMounts:
+            - name: nvidia0
+              mountPath: /dev/nvidia0
+            - name: nvidiactl
+              mountPath: /dev/nvidiactl
+          securityContext:
+            privileged: true
+        - name: algorithm-container
+          image: algorithm-image
+          imagePullPolicy: Never
+          command: [ "/bin/sh", "-c", "run.sh; touch /tmp/hello.txt;while true;do /bin/echo $(date +%T) >> /tmp/hello.txt; sleep 5; done;" ]
+      restartPolicy: Always
+      volumes:
+        - name: nvidia0
+          hostPath:
+            path: /dev/nvidia0
+        - name: nvidiactl
+          hostPath:
+            path: /dev/nvidiactl

+ 72 - 0
simulation-resource-scheduler-agent/src/main/resources/kubernetes/template/pod/pod-template.yaml

@@ -0,0 +1,72 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: pod-name
+  namespace: namespace-name
+  labels:
+    user: CICV
+spec:
+  nodeName: node-name
+  dnsPolicy: None
+  dnsConfig:
+    nameservers:
+      - 223.6.6.6
+      - 8.8.8.8
+  hostAliases:
+    - ip: 172.17.0.184
+      hostnames:
+        - cicvtest002
+    - ip: 172.17.0.188
+      hostnames:
+        - scgpu001
+  containers:
+    - name: vtd-container
+      image: vtd-image
+      imagePullPolicy: Always
+      command: [ "/Controller/VTDController", "vtd-command", "kafkaTopic" ]
+      env:
+        - name: PodName
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: LM_LICENSE_FILE
+          value: 27500@172.14.1.103
+        - name: KAFKA_IP
+          value: 172.17.0.184:9092
+        - name: MINIO_IP
+          value: 172.17.0.184:9000
+        - name: SIMULATION_CLOUD_IP
+          value: 172.17.0.185
+        - name: KAFKA_PARTITION
+          value: kafka-partition
+        - name: KAFKA_OFFSET
+          value: kafka-offset
+      volumeMounts:
+        - name: nvidia0
+          mountPath: /dev/nvidia0
+        - name: nvidiactl
+          mountPath: /dev/nvidiactl
+      securityContext:
+        privileged: true
+      resources:
+        limits:
+          cpu: "4"
+        requests:
+          cpu: "4"
+    - name: algorithm-container
+      image: algorithm-image
+      imagePullPolicy: Always
+      command: [ "/bin/sh", "-c", "/run.sh; touch /tmp/hello.txt;while true;do /bin/echo $(date +%T) >> /tmp/hello.txt; sleep 5; done;" ]
+      resources:
+        limits:
+          cpu: "3"
+        requests:
+          cpu: "3"
+  restartPolicy: Never
+  volumes:
+    - name: nvidia0
+      hostPath:
+        path: /dev/nvidia0
+    - name: nvidiactl
+      hostPath:
+        path: /dev/nvidiactl

+ 125 - 0
simulation-resource-scheduler-agent/src/main/resources/logback-spring.xml

@@ -0,0 +1,125 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<configuration debug="true">
+    <!-- 项目名称 -->
+    <property name="PROJECT_NAME" value="simulation-gateway"/>
+
+    <!--定义不同环境的日志文件的存储地址 勿在 LogBack 的配置中使用相对路径-->
+    <springProfile name="private">
+        <property name="LOG_HOME" value="./log"/>
+    </springProfile>
+    <springProfile name="aliyun">
+        <property name="LOG_HOME" value="./log"/>
+    </springProfile>
+    <springProfile name="test">
+        <property name="LOG_HOME" value="./log"/>
+    </springProfile>
+
+    <!--输出到控制台-->
+    <appender name="console" class="ch.qos.logback.core.ConsoleAppender">
+        <!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息-->
+        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
+            <level>debug</level>
+        </filter>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %highlight(%-5level) %cyan(%logger{5}).%M\(%F:%L\) %highlight(%msg) %n</pattern>
+            <charset>utf-8</charset>
+        </encoder>
+    </appender>
+
+    <!--输出到debug-->
+    <appender name="debug" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+            <!--日志文件输出的文件名-->
+            <FileNamePattern>${LOG_HOME}/debug.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
+            <!--日志文件保留天数-->
+            <MaxHistory>15</MaxHistory>
+            <!--日志文件最大的大小-->
+            <MaxFileSize>10MB</MaxFileSize>
+        </rollingPolicy>
+        <append>true</append>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %highlight(%-5level) %cyan(%logger{5}).%M\(%F:%L\) %highlight(%msg) %n</pattern>
+            <charset>utf-8</charset>
+        </encoder>
+        <filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印DEBUG日志 -->
+            <level>DEBUG</level>
+            <onMatch>ACCEPT</onMatch>
+            <onMismatch>DENY</onMismatch>
+        </filter>
+    </appender>
+
+    <!--输出到info-->
+    <appender name="info" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+            <!--日志文件输出的文件名-->
+            <FileNamePattern>${LOG_HOME}/info.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
+            <!--日志文件保留天数-->
+            <MaxHistory>15</MaxHistory>
+            <!--日志文件最大的大小-->
+            <MaxFileSize>10MB</MaxFileSize>
+        </rollingPolicy>
+        <append>true</append>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %highlight(%-5level) %cyan(%logger{5}).%M\(%F:%L\) %highlight(%msg) %n</pattern>
+            <charset>utf-8</charset>
+        </encoder>
+        <filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印INFO日志 -->
+            <level>INFO</level>
+            <onMatch>ACCEPT</onMatch>
+            <onMismatch>DENY</onMismatch>
+        </filter>
+    </appender>
+
+    <!--输出到error-->
+    <appender name="error" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+            <!--日志文件输出的文件名-->
+            <FileNamePattern>${LOG_HOME}/error.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
+            <!--日志文件保留天数-->
+            <MaxHistory>15</MaxHistory>
+            <!--日志文件最大的大小-->
+            <MaxFileSize>10MB</MaxFileSize>
+        </rollingPolicy>
+        <append>true</append>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %highlight(%-5level) %cyan(%logger{5}).%M\(%F:%L\) %highlight(%msg) %n</pattern>
+            <charset>utf-8</charset>
+        </encoder>
+        <filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印ERROR日志 -->
+            <level>ERROR</level>
+            <onMatch>ACCEPT</onMatch>
+            <onMismatch>DENY</onMismatch>
+        </filter>
+    </appender>
+
+    <!--输出到warn-->
+    <appender name="warn" class="ch.qos.logback.core.rolling.RollingFileAppender">
+        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
+            <!--日志文件输出的文件名-->
+            <FileNamePattern>${LOG_HOME}/warn.%d{yyyy-MM-dd}.%i.log</FileNamePattern>
+            <!--日志文件保留天数-->
+            <MaxHistory>15</MaxHistory>
+            <!--日志文件最大的大小-->
+            <MaxFileSize>10MB</MaxFileSize>
+        </rollingPolicy>
+        <append>true</append>
+        <encoder>
+            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %highlight(%-5level) %cyan(%logger{5}).%M\(%F:%L\) %highlight(%msg) %n</pattern>
+            <charset>utf-8</charset>
+        </encoder>
+        <filter class="ch.qos.logback.classic.filter.LevelFilter"><!-- 只打印WARN日志 -->
+            <level>WARN</level>
+            <onMatch>ACCEPT</onMatch>
+            <onMismatch>DENY</onMismatch>
+        </filter>
+    </appender>
+
+    <!--分别设置对应的日志输出节点 -->
+    <root level="info">
+        <appender-ref ref="console"/>
+        <appender-ref ref="debug"/>
+        <appender-ref ref="info"/>
+        <appender-ref ref="error"/>
+        <appender-ref ref="warn"/>
+    </root>
+</configuration>

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff