feat: 2.6.0

1. 优化工作流执行丢弃问题
This commit is contained in:
byteblogs168 2024-01-25 14:01:46 +08:00
parent 5d266bdb9a
commit 58e5c10048
10 changed files with 89 additions and 90 deletions

View File

@ -2,6 +2,7 @@ package com.aizuda.easy.retry.server.job.task.support.dispatch;
import akka.actor.AbstractActor;
import akka.actor.ActorRef;
import cn.hutool.core.util.RandomUtil;
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
import com.aizuda.easy.retry.common.core.enums.StatusEnum;
import com.aizuda.easy.retry.common.log.EasyRetryLog;
@ -21,7 +22,6 @@ import com.aizuda.easy.retry.server.job.task.support.WorkflowTaskConverter;
import com.aizuda.easy.retry.template.datasource.persistence.mapper.GroupConfigMapper;
import com.aizuda.easy.retry.template.datasource.persistence.mapper.WorkflowMapper;
import com.aizuda.easy.retry.template.datasource.persistence.po.GroupConfig;
import com.aizuda.easy.retry.template.datasource.persistence.po.Job;
import com.aizuda.easy.retry.template.datasource.persistence.po.Workflow;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.extension.plugins.pagination.PageDTO;
@ -32,9 +32,11 @@ import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import java.math.RoundingMode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
/**
@ -75,6 +77,7 @@ public class ScanWorkflowTaskActor extends AbstractActor {
long now = DateUtils.toNowMilli();
for (PartitionTask partitionTask : partitionTasks) {
WorkflowPartitionTaskDTO workflowPartitionTaskDTO = (WorkflowPartitionTaskDTO) partitionTask;
log.warn("监控时间. workflowId:[{}] now:[{}], dbnextTriggerAt:[{}]", workflowPartitionTaskDTO.getId(), now, workflowPartitionTaskDTO.getNextTriggerAt());
processWorkflow(workflowPartitionTaskDTO, waitUpdateJobs, waitExecWorkflows, now);
}
@ -82,6 +85,8 @@ public class ScanWorkflowTaskActor extends AbstractActor {
workflowMapper.updateBatchNextTriggerAtById(waitUpdateJobs);
for (final WorkflowTaskPrepareDTO waitExecTask : waitExecWorkflows) {
log.warn("监控时间. workflowId:[{}] now:[{}], nextTriggerAt:[{}]", waitExecTask.getWorkflowId(), now, waitExecTask.getNextTriggerAt());
// 执行预处理阶段
ActorRef actorRef = ActorGenerator.workflowTaskPrepareActor();
waitExecTask.setTaskExecutorScene(JobTaskExecutorSceneEnum.AUTO_WORKFLOW.getType());
@ -93,13 +98,14 @@ public class ScanWorkflowTaskActor extends AbstractActor {
List<WorkflowTaskPrepareDTO> waitExecJobs, long now) {
CacheConsumerGroup.addOrUpdate(partitionTask.getGroupName(), partitionTask.getNamespaceId());
Workflow workflow = new Workflow();
workflow.setId(partitionTask.getId());
// 更新下次触发时间
Long nextTriggerAt = calculateNextTriggerTime(partitionTask, now);
Workflow workflow = new Workflow();
workflow.setId(partitionTask.getId());
workflow.setNextTriggerAt(nextTriggerAt);
waitUpdateWorkflows.add(workflow);
waitExecJobs.add(WorkflowTaskConverter.INSTANCE.toWorkflowTaskPrepareDTO(partitionTask));
}
@ -108,7 +114,8 @@ public class ScanWorkflowTaskActor extends AbstractActor {
long nextTriggerAt = partitionTask.getNextTriggerAt();
if ((nextTriggerAt + DateUtils.toEpochMilli(SystemConstants.SCHEDULE_PERIOD)) < now) {
nextTriggerAt = now;
long randomMs = (long) (RandomUtil.randomDouble(0, 4, 2, RoundingMode.UP) * 1000);
nextTriggerAt = now + randomMs;
partitionTask.setNextTriggerAt(nextTriggerAt);
}

View File

@ -35,6 +35,7 @@ import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -62,7 +63,9 @@ public class WorkflowExecutorActor extends AbstractActor {
return receiveBuilder().match(WorkflowNodeTaskExecuteDTO.class, taskExecute -> {
log.info("工作流开始执行. [{}]", JsonUtil.toJsonString(taskExecute));
try {
doExecutor(taskExecute);
} catch (Exception e) {
EasyRetryLog.LOCAL.error("workflow executor exception. [{}]", taskExecute, e);
handlerTaskBatch(taskExecute, JobTaskBatchStatusEnum.FAIL.getStatus(), JobOperationReasonEnum.TASK_EXECUTION_ERROR.getReason());
@ -77,6 +80,8 @@ public class WorkflowExecutorActor extends AbstractActor {
WorkflowTaskBatch workflowTaskBatch = workflowTaskBatchMapper.selectById(taskExecute.getWorkflowTaskBatchId());
Assert.notNull(workflowTaskBatch, () -> new EasyRetryServerException("任务不存在"));
handlerTaskBatch(taskExecute, JobTaskBatchStatusEnum.RUNNING.getStatus(), JobOperationReasonEnum.NONE.getReason());
// 获取DAG图
String flowInfo = workflowTaskBatch.getFlowInfo();
MutableGraph<Long> graph = MutableGraphCache.getOrDefault(workflowTaskBatch.getId(), flowInfo);
@ -168,8 +173,10 @@ public class WorkflowExecutorActor extends AbstractActor {
jobTaskBatch.setExecutionAt(DateUtils.toNowMilli());
jobTaskBatch.setTaskBatchStatus(taskStatus);
jobTaskBatch.setOperationReason(operationReason);
jobTaskBatch.setUpdateDt(LocalDateTime.now());
Assert.isTrue(1 == workflowTaskBatchMapper.updateById(jobTaskBatch),
() -> new EasyRetryServerException("更新任务失败"));
}
}

View File

@ -215,10 +215,10 @@ public class WorkflowBatchHandler {
// 说明没有后继节点了, 此时需要判断整个DAG是否全部执行完成
List<JobTaskBatch> jobTaskBatches = jobTaskBatchMapper.selectList(new LambdaQueryWrapper<JobTaskBatch>()
.eq(JobTaskBatch::getWorkflowTaskBatchId, workflowTaskBatchId)
.in(JobTaskBatch::getWorkflowNodeId, graph.nodes())
.in(JobTaskBatch::getWorkflowNodeId, graph.nodes()).orderByDesc(JobTaskBatch::getId)
);
Map<Long, JobTaskBatch> jobTaskBatchMap = jobTaskBatches.stream().collect(Collectors.toMap(JobTaskBatch::getWorkflowNodeId, i -> i));
Map<Long, JobTaskBatch> jobTaskBatchMap = jobTaskBatches.stream().collect(Collectors.toMap(JobTaskBatch::getWorkflowNodeId, i -> i, (i,j) -> i));
checkWorkflowExecutor(SystemConstants.ROOT, workflowTaskBatchId, graph, jobTaskBatchMap);
}
@ -257,6 +257,7 @@ public class WorkflowBatchHandler {
jobTaskPrepare.setTaskExecutorScene(JobTaskExecutorSceneEnum.AUTO_WORKFLOW.getType());
jobTaskPrepare.setNextTriggerAt(DateUtils.toNowMilli() + DateUtils.toNowMilli() % 1000);
jobTaskPrepare.setWorkflowTaskBatchId(workflowTaskBatchId);
jobTaskPrepare.setWorkflowNodeId(successor);
jobTaskPrepare.setParentWorkflowNodeId(parentId);
// 执行预处理阶段
ActorRef actorRef = ActorGenerator.jobTaskPrepareActor();

View File

@ -28,7 +28,7 @@ public class JobTimerWheel implements Lifecycle {
private static final String THREAD_NAME_PREFIX = "job-task-timer-wheel-";
private static HashedWheelTimer timer = null;
private static final ThreadPoolExecutor executor =
new ThreadPoolExecutor(16, 16, 10, TimeUnit.SECONDS,
new ThreadPoolExecutor(32, 32, 10, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(), new CustomizableThreadFactory(THREAD_NAME_PREFIX));
private static final TimerIdempotent idempotent = new TimerIdempotent();
@ -44,8 +44,8 @@ public class JobTimerWheel implements Lifecycle {
public static void register(Integer taskType, Long uniqueId, TimerTask task, long delay, TimeUnit unit) {
if (!isExisted(taskType, uniqueId)) {
log.info("加入时间轮. delay:[{}ms] taskType:[{}] uniqueId:[{}]", delay, taskType, uniqueId);
delay = delay < 0 ? 0 : delay;
log.info("加入时间轮. delay:[{}ms] uniqueId:[{}]", delay, uniqueId);
try {
timer.newTimeout(task, delay, unit);
idempotent.set(uniqueId, uniqueId);

View File

@ -1,18 +1,10 @@
package com.aizuda.easy.retry.server.job.task.support.timer;
import akka.actor.ActorRef;
import cn.hutool.core.lang.Assert;
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
import com.aizuda.easy.retry.common.core.context.SpringContext;
import com.aizuda.easy.retry.common.core.enums.JobOperationReasonEnum;
import com.aizuda.easy.retry.common.core.enums.JobTaskBatchStatusEnum;
import com.aizuda.easy.retry.server.common.akka.ActorGenerator;
import com.aizuda.easy.retry.server.common.exception.EasyRetryServerException;
import com.aizuda.easy.retry.server.common.util.DateUtils;
import com.aizuda.easy.retry.server.job.task.dto.WorkflowNodeTaskExecuteDTO;
import com.aizuda.easy.retry.server.job.task.dto.WorkflowTimerTaskDTO;
import com.aizuda.easy.retry.template.datasource.persistence.mapper.WorkflowTaskBatchMapper;
import com.aizuda.easy.retry.template.datasource.persistence.po.WorkflowTaskBatch;
import io.netty.util.Timeout;
import io.netty.util.TimerTask;
import lombok.AllArgsConstructor;
@ -38,10 +30,6 @@ public class WorkflowTimerTask implements TimerTask {
try {
int taskStatus = JobTaskBatchStatusEnum.RUNNING.getStatus();
int operationReason = JobOperationReasonEnum.NONE.getReason();
handlerTaskBatch(workflowTimerTaskDTO.getWorkflowTaskBatchId(), taskStatus, operationReason);
WorkflowNodeTaskExecuteDTO taskExecuteDTO = new WorkflowNodeTaskExecuteDTO();
taskExecuteDTO.setWorkflowTaskBatchId(workflowTimerTaskDTO.getWorkflowTaskBatchId());
taskExecuteDTO.setWorkflowId(workflowTimerTaskDTO.getWorkflowId());
@ -55,16 +43,4 @@ public class WorkflowTimerTask implements TimerTask {
}
}
private void handlerTaskBatch(Long workflowTaskBatchId, int taskStatus, int operationReason) {
WorkflowTaskBatch jobTaskBatch = new WorkflowTaskBatch();
jobTaskBatch.setId(workflowTaskBatchId);
jobTaskBatch.setExecutionAt(DateUtils.toNowMilli());
jobTaskBatch.setTaskBatchStatus(taskStatus);
jobTaskBatch.setOperationReason(operationReason);
Assert.isTrue(1 == SpringContext.getBeanByType(WorkflowTaskBatchMapper.class).updateById(jobTaskBatch),
() -> new EasyRetryServerException("更新任务失败"));
}
}

View File

@ -6,7 +6,7 @@ akka {
thread-pool-executor {
core-pool-size-min = 8
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -17,7 +17,7 @@ akka {
thread-pool-executor {
core-pool-size-min = 8
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -28,7 +28,7 @@ akka {
thread-pool-executor {
core-pool-size-min = 16
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -39,7 +39,7 @@ akka {
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -48,9 +48,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 8
core-pool-size-min = 32
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -59,9 +59,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-min = 64
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -70,9 +70,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-min = 64
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -81,9 +81,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-min = 64
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -92,9 +92,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 8
core-pool-size-min = 64
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -103,9 +103,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-min = 64
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 128
}
throughput = 10
}
@ -114,9 +114,9 @@ akka {
type = "Dispatcher"
executor = "thread-pool-executor"
thread-pool-executor {
core-pool-size-min = 32
core-pool-size-min = 128
core-pool-size-factor = 2.0
core-pool-size-max = 64
core-pool-size-max = 256
}
throughput = 10
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -5,8 +5,8 @@
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Easy Retry</title>
<script type="module" crossorigin src="./assets/D95t2zJ0.js"></script>
<link rel="stylesheet" crossorigin href="./assets/i0Vr07RR.css">
<script type="module" crossorigin src="./assets/nqxcaVpR.js"></script>
<link rel="stylesheet" crossorigin href="./assets/eHB570AZ.css">
</head>
<body>

View File

@ -81,6 +81,7 @@ export default {
interval: null,
startId: 0,
fromIndex: 0,
controller: new AbortController(),
indicator: <a-icon type="loading" style="font-size: 24px; color: '#d9d9d9'" spin/>,
LevelEnum: {
DEBUG: {
@ -104,46 +105,53 @@ export default {
},
mounted () {
this.getLogList()
this.interval = setInterval(() => {
this.getLogList()
}, 1000)
},
beforeDestroy () {
this.stopLog()
},
methods: {
onCancel () {
clearInterval(this.interval)
this.stopLog()
this.$emit('update:open', false)
},
stopLog () {
this.finished = true
this.controller.abort()
clearTimeout(this.interval)
this.interval = undefined
},
getLogList () {
if (!this.finished) {
request(
{
url: '/job/log/list',
method: 'get',
params: {
taskBatchId: this.record.taskBatchId,
jobId: this.record.jobId,
taskId: this.record.id,
startId: this.startId,
fromIndex: this.fromIndex,
size: 50
}
request(
{
url: '/job/log/list',
method: 'get',
params: {
taskBatchId: this.record.taskBatchId,
jobId: this.record.jobId,
taskId: this.record.id,
startId: this.startId,
fromIndex: this.fromIndex,
size: 50
},
signal: this.controller.signal
}
)
.then((res) => {
this.finished = res.data.finished
this.startId = res.data.nextStartId
this.fromIndex = res.data.fromIndex
if (res.data.message) {
this.logList.push(...res.data.message)
this.logList.sort((a, b) => a.time_stamp - b.time_stamp)
}
)
.then((res) => {
this.finished = res.data.finished
this.startId = res.data.nextStartId
this.fromIndex = res.data.fromIndex
if (res.data.message) {
this.logList.push(...res.data.message)
this.logList.sort((a, b) => a.time_stamp - b.time_stamp)
}
})
.catch(() => {
this.finished = true
})
} else {
clearInterval(this.interval)
}
if (!this.finished) {
clearTimeout(this.interval)
this.interval = setTimeout(this.getLogList, 1000)
}
})
.catch(() => {
this.finished = true
})
},
timestampToDate (timestamp) {
const date = new Date(Number.parseInt(timestamp.toString()))