feat:2.4.0
1. 修复重试详情404问题 2. 优化获取下次触发时间的值
This commit is contained in:
parent
171951f09a
commit
efe7a5d13b
@ -86,4 +86,14 @@ public interface SystemConstants {
|
||||
String JOB_SHARDING_VALUE_SEPARATOR = "#=@";
|
||||
|
||||
String JOB_SHARDING_ARGS_SEPARATOR = "#;@";
|
||||
|
||||
/**
|
||||
* 调度时长
|
||||
*/
|
||||
Long SCHEDULE_PERIOD = 10L;
|
||||
|
||||
/**
|
||||
* 延迟30s为了尽可能保障集群节点都启动完成在进行rebalance
|
||||
*/
|
||||
Long SCHEDULE_INITIAL_DELAY = 30L;
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ public class JdbcLockProvider extends AbstractLockProvider {
|
||||
distributedLock.setUpdateDt(now);
|
||||
return distributedLockMapper.insert(distributedLock) > 0;
|
||||
} catch (DuplicateKeyException | ConcurrencyFailureException | TransactionSystemException e) {
|
||||
LogUtils.warn(log,"Duplicate key. lockName:[{}]", lockConfig.getLockName());
|
||||
// LogUtils.warn(log,"Duplicate key. lockName:[{}]", lockConfig.getLockName());
|
||||
return false;
|
||||
} catch (DataIntegrityViolationException | BadSqlGrammarException | UncategorizedSQLException e) {
|
||||
LogUtils.error(log,"Unexpected exception. lockName:[{}]", lockConfig.getLockName(), e);
|
||||
|
@ -2,7 +2,10 @@ package com.aizuda.easy.retry.server.job.task.support.dispatch;
|
||||
|
||||
import akka.actor.AbstractActor;
|
||||
import akka.actor.ActorRef;
|
||||
import cn.hutool.core.date.DateUnit;
|
||||
import cn.hutool.core.date.DateUtil;
|
||||
import cn.hutool.core.lang.Assert;
|
||||
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
|
||||
import com.aizuda.easy.retry.common.core.enums.StatusEnum;
|
||||
import com.aizuda.easy.retry.common.core.log.LogUtils;
|
||||
import com.aizuda.easy.retry.server.common.akka.ActorGenerator;
|
||||
@ -125,7 +128,14 @@ public class ScanJobTaskActor extends AbstractActor {
|
||||
WaitStrategyContext waitStrategyContext = new WaitStrategyContext();
|
||||
waitStrategyContext.setTriggerType(partitionTask.getTriggerType());
|
||||
waitStrategyContext.setTriggerInterval(partitionTask.getTriggerInterval());
|
||||
waitStrategyContext.setNextTriggerAt(partitionTask.getNextTriggerAt());
|
||||
|
||||
LocalDateTime now = LocalDateTime.now();
|
||||
LocalDateTime nextTriggerAt = partitionTask.getNextTriggerAt();
|
||||
if (nextTriggerAt.plusSeconds(SystemConstants.SCHEDULE_PERIOD).isBefore(now)) {
|
||||
nextTriggerAt = now;
|
||||
}
|
||||
|
||||
waitStrategyContext.setNextTriggerAt(nextTriggerAt);
|
||||
|
||||
return waitStrategy.computeRetryTime(waitStrategyContext);
|
||||
}
|
||||
@ -136,7 +146,7 @@ public class ScanJobTaskActor extends AbstractActor {
|
||||
new LambdaQueryWrapper<Job>()
|
||||
.eq(Job::getJobStatus, StatusEnum.YES.getStatus())
|
||||
.in(Job::getBucketIndex, scanTask.getBuckets())
|
||||
.le(Job::getNextTriggerAt, LocalDateTime.now().plusSeconds(10))
|
||||
.le(Job::getNextTriggerAt, LocalDateTime.now().plusSeconds(SystemConstants.SCHEDULE_PERIOD))
|
||||
.eq(Job::getDeleted, StatusEnum.NO.getStatus())
|
||||
.ge(Job::getId, startId)
|
||||
).getRecords();
|
||||
|
@ -122,12 +122,8 @@ public class WaitStrategies {
|
||||
public LocalDateTime computeRetryTime(WaitStrategyContext context) {
|
||||
long triggerInterval = Long.parseLong(context.triggerInterval);
|
||||
|
||||
LocalDateTime nextTriggerAt = context.getNextTriggerAt();
|
||||
if (nextTriggerAt.isBefore(LocalDateTime.now())) {
|
||||
nextTriggerAt = LocalDateTime.now();
|
||||
}
|
||||
|
||||
return nextTriggerAt.plusSeconds(triggerInterval);
|
||||
return context.nextTriggerAt.plusSeconds(triggerInterval);
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,14 +135,9 @@ public class WaitStrategies {
|
||||
@Override
|
||||
public LocalDateTime computeRetryTime(WaitStrategyContext context) {
|
||||
|
||||
LocalDateTime nextTriggerAt = context.getNextTriggerAt();
|
||||
if (nextTriggerAt.isBefore(LocalDateTime.now())) {
|
||||
nextTriggerAt = LocalDateTime.now();
|
||||
}
|
||||
|
||||
Date nextValidTime;
|
||||
try {
|
||||
ZonedDateTime zdt = nextTriggerAt.atZone(ZoneOffset.ofHours(8));
|
||||
ZonedDateTime zdt = context.nextTriggerAt.atZone(ZoneOffset.ofHours(8));
|
||||
nextValidTime = new CronExpression(context.getTriggerInterval()).getNextValidTimeAfter(Date.from(zdt.toInstant()));
|
||||
} catch (ParseException e) {
|
||||
throw new EasyRetryServerException("解析CRON表达式异常 [{}]", context.getTriggerInterval(), e);
|
||||
|
@ -1,6 +1,7 @@
|
||||
package com.aizuda.easy.retry.server.retry.task.support.dispatch.actor.scan;
|
||||
|
||||
import akka.actor.AbstractActor;
|
||||
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
|
||||
import com.aizuda.easy.retry.common.core.enums.RetryStatusEnum;
|
||||
import com.aizuda.easy.retry.common.core.log.LogUtils;
|
||||
import com.aizuda.easy.retry.server.common.IdempotentStrategy;
|
||||
@ -89,7 +90,7 @@ public abstract class AbstractScanGroup extends AbstractActor {
|
||||
|
||||
// 计算循环拉取的次数
|
||||
if (preCostTime.get() > 0) {
|
||||
long loopCount = Math.max((10 * 1000) / preCostTime.get(), 1);
|
||||
long loopCount = Math.max((SystemConstants.SCHEDULE_PERIOD * 1000) / preCostTime.get(), 1);
|
||||
// TODO 最大拉取次数支持可配置
|
||||
loopCount = Math.min(loopCount, 10);
|
||||
pullCount.set(loopCount);
|
||||
@ -158,7 +159,7 @@ public abstract class AbstractScanGroup extends AbstractActor {
|
||||
new LambdaQueryWrapper<RetryTask>()
|
||||
.eq(RetryTask::getRetryStatus, RetryStatusEnum.RUNNING.getStatus())
|
||||
.eq(RetryTask::getGroupName, groupName).eq(RetryTask::getTaskType, taskType)
|
||||
.le(RetryTask::getNextTriggerAt, LocalDateTime.now().plusSeconds(10))
|
||||
.le(RetryTask::getNextTriggerAt, LocalDateTime.now().plusSeconds(SystemConstants.SCHEDULE_PERIOD))
|
||||
.gt(RetryTask::getId, lastId)
|
||||
.orderByAsc(RetryTask::getId))
|
||||
.getRecords();
|
||||
|
@ -1,5 +1,6 @@
|
||||
package com.aizuda.easy.retry.server.retry.task.support.dispatch.actor.scan;
|
||||
|
||||
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
|
||||
import com.aizuda.easy.retry.server.common.akka.ActorGenerator;
|
||||
import com.aizuda.easy.retry.server.retry.task.dto.RetryPartitionTask;
|
||||
import com.aizuda.easy.retry.server.retry.task.support.WaitStrategy;
|
||||
@ -57,7 +58,14 @@ public class ScanRetryTaskActor extends AbstractScanGroup {
|
||||
.getSceneConfigByGroupNameAndSceneName(partitionTask.getGroupName(), partitionTask.getSceneName());
|
||||
|
||||
WaitStrategyContext waitStrategyContext = new WaitStrategyContext();
|
||||
waitStrategyContext.setNextTriggerAt(partitionTask.getNextTriggerAt());
|
||||
|
||||
LocalDateTime now = LocalDateTime.now();
|
||||
LocalDateTime nextTriggerAt = partitionTask.getNextTriggerAt();
|
||||
if (nextTriggerAt.plusSeconds(SystemConstants.SCHEDULE_PERIOD).isBefore(now)) {
|
||||
nextTriggerAt = now;
|
||||
}
|
||||
|
||||
waitStrategyContext.setNextTriggerAt(nextTriggerAt);
|
||||
waitStrategyContext.setTriggerInterval(sceneConfig.getTriggerInterval());
|
||||
waitStrategyContext.setTriggerCount(partitionTask.getRetryCount() + 1);
|
||||
// 更新触发时间, 任务进入时间轮
|
||||
|
@ -172,17 +172,6 @@ public class WaitStrategies {
|
||||
|
||||
@Override
|
||||
public LocalDateTime computeRetryTime(WaitStrategyContext retryContext) {
|
||||
// if (TaskTypeEnum.CALLBACK.getType().equals(retryTask.getTaskType())) {
|
||||
// // 回调失败的默认15分钟执行一次重试
|
||||
// SystemProperties systemProperties = SpringContext.CONTEXT.getBean(SystemProperties.class);
|
||||
// triggerInterval = systemProperties.getCallback().getTriggerInterval();
|
||||
// } else {
|
||||
// AccessTemplate accessTemplate = SpringContext.CONTEXT.getBean(AccessTemplate.class);
|
||||
// SceneConfig sceneConfig =
|
||||
// accessTemplate.getSceneConfigAccess().getSceneConfigByGroupNameAndSceneName(retryTask.getGroupName(), retryTask.getSceneName());
|
||||
// triggerInterval = Integer.parseInt(sceneConfig.getTriggerInterval());
|
||||
// }
|
||||
|
||||
return retryContext.getNextTriggerAt().plusSeconds(Integer.parseInt(retryContext.getTriggerInterval()));
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package com.aizuda.easy.retry.server.starter.dispatch;
|
||||
|
||||
import akka.actor.ActorRef;
|
||||
import com.aizuda.easy.retry.common.core.constant.SystemConstants;
|
||||
import com.aizuda.easy.retry.common.core.log.LogUtils;
|
||||
import com.aizuda.easy.retry.server.common.Lifecycle;
|
||||
import com.aizuda.easy.retry.server.common.akka.ActorGenerator;
|
||||
@ -35,12 +36,12 @@ public class DispatchService implements Lifecycle {
|
||||
/**
|
||||
* 调度时长
|
||||
*/
|
||||
public static final Long PERIOD = 10L;
|
||||
public static final Long PERIOD = SystemConstants.SCHEDULE_PERIOD;
|
||||
|
||||
/**
|
||||
* 延迟30s为了尽可能保障集群节点都启动完成在进行rebalance
|
||||
*/
|
||||
public static final Long INITIAL_DELAY = 30L;
|
||||
public static final Long INITIAL_DELAY = SystemConstants.SCHEDULE_INITIAL_DELAY;
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
|
@ -56,7 +56,7 @@ export const asyncRouterMap = [
|
||||
name: 'RetryTask',
|
||||
component: RouteView,
|
||||
redirect: '/retry/list',
|
||||
meta: { title: '重试任务管理', icon: 'schedule', hideChildrenInMenu: true, keepAlive: true, permission: ['retryTask'] },
|
||||
meta: { title: '重试任务管理', icon: 'schedule', permission: ['retryTask'] },
|
||||
children: [
|
||||
{
|
||||
path: '/retry/scene/list',
|
||||
|
@ -223,8 +223,6 @@ export default {
|
||||
methods: {
|
||||
loadData (record) {
|
||||
const foundItem = this.logData.filter(item => item.taskId === record.id)
|
||||
console.log(record)
|
||||
console.log(foundItem)
|
||||
return foundItem
|
||||
},
|
||||
handleChange (value) {
|
||||
|
@ -263,7 +263,7 @@ export default {
|
||||
this.advanced = !this.advanced
|
||||
},
|
||||
handleInfo (record) {
|
||||
this.$router.push({ path: '/retry-dead-letter/info', query: { id: record.id, groupName: record.groupName } })
|
||||
this.$router.push({ path: '/retry/dead-letter/info', query: { id: record.id, groupName: record.groupName } })
|
||||
},
|
||||
onClick ({ key }) {
|
||||
if (key === '1') {
|
||||
|
@ -253,7 +253,8 @@ export default {
|
||||
this.advanced = !this.advanced
|
||||
},
|
||||
handleInfo (record) {
|
||||
this.$router.push({ path: '/retry-log/info', query: { id: record.id } })
|
||||
console.log(record)
|
||||
this.$router.push({ path: '/retry/log/info', query: { id: record.id } })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -85,6 +85,7 @@ export default {
|
||||
}
|
||||
},
|
||||
created () {
|
||||
console.log('111')
|
||||
const id = this.$route.query.id
|
||||
const groupName = this.$route.query.groupName
|
||||
if (id && groupName) {
|
||||
@ -97,7 +98,7 @@ export default {
|
||||
this.$refs.retryTaskLogMessageListRef.refreshTable(this.queryParam)
|
||||
})
|
||||
} else {
|
||||
this.$router.push({ path: '/404' })
|
||||
// this.$router.push({ path: '/404' })
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
|
@ -339,7 +339,7 @@ export default {
|
||||
this.advanced = !this.advanced
|
||||
},
|
||||
handleInfo (record) {
|
||||
this.$router.push({ path: '/retry-task/info', query: { id: record.id, groupName: record.groupName } })
|
||||
this.$router.push({ path: '/retry/info', query: { id: record.id, groupName: record.groupName } })
|
||||
},
|
||||
handleOk (record) {},
|
||||
handleSuspend (record) {
|
||||
|
Loading…
Reference in New Issue
Block a user