feat: 达梦支持

This commit is contained in:
dhb52 2024-06-01 22:53:14 +08:00 committed by opensnail
parent e6f6189d0e
commit ac128a9ba5
25 changed files with 1518 additions and 47 deletions

View File

@ -6,6 +6,7 @@ volumes:
postgres: { }
sqlserver: { }
mariadb: { }
dm8: { }
services:
mysql:
@ -84,3 +85,26 @@ services:
- mariadb:/var/lib/mysql/
# 注入初始化脚本, mysql
- ../sql/snail_job_mysql.sql:/docker-entrypoint-initdb.d/init.sql:ro
dm8:
# wget https://download.dameng.com/eco/dm8/dm8_20230808_rev197096_x86_rh6_64_single.tar
# docker load -i dm8_20230808_rev197096_x86_rh6_64_single.tar
image: dm8_single:dm8_20230808_rev197096_x86_rh6_64
restart: unless-stopped
environment:
PAGE_SIZE: 16
LD_LIBRARY_PATH: /opt/dmdbms/bin
EXTENT_SIZE: 32
BLANK_PAD_MODE: 1
LOG_SIZE: 1024
UNICODE_FLAG: 1
LENGTH_IN_CHAR: 1
INSTANCE_NAME: dm8_test
ports:
- "5236:5236"
volumes:
- dm8:/opt/dmdbms/data
# docker compose exec dm8 bash -c "exec /opt/dmdbms/bin/disql SYSDBA/SYSDBA001 \`/tmp/schema.sql"
- ../sql/snail_job_dm8.sql:/tmp/schema.sql:ro

821
doc/sql/snail_job_dm8.sql Normal file
View File

@ -0,0 +1,821 @@
/*
SnailJob Database Transfer Tool
Source Server Type : MySQL
Target Server Type : DM8
Date: 2024-06-01 00:26:12
*/
-- sj_namespace
CREATE TABLE sj_namespace
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
name varchar(64) NULL,
unique_id varchar(64) NULL,
description varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_namespace_01 ON sj_namespace (name);
COMMENT ON COLUMN sj_namespace.id IS '主键';
COMMENT ON COLUMN sj_namespace.name IS '名称';
COMMENT ON COLUMN sj_namespace.unique_id IS '唯一id';
COMMENT ON COLUMN sj_namespace.description IS '描述';
COMMENT ON COLUMN sj_namespace.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_namespace.create_dt IS '创建时间';
COMMENT ON COLUMN sj_namespace.update_dt IS '修改时间';
COMMENT ON TABLE sj_namespace IS '命名空间';
INSERT INTO sj_namespace (name, unique_id, create_dt, update_dt, deleted)
VALUES ('Default', '764d604ec6fc45f68cd92514c40e9e1a', sysdate, sysdate, 0);
-- sj_group_config
CREATE TABLE sj_group_config
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) DEFAULT '' NULL,
description varchar(256) DEFAULT '' NULL,
token varchar(64) DEFAULT 'SJ_cKqBTPzCsWA3VyuCfFoccmuIEGXjr5KT' NULL,
group_status smallint DEFAULT 0 NOT NULL,
version int NOT NULL,
group_partition int NOT NULL,
id_generator_mode smallint DEFAULT 1 NOT NULL,
init_scene smallint DEFAULT 0 NOT NULL,
bucket_index int DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_group_config_01 ON sj_group_config (namespace_id, group_name);
COMMENT ON COLUMN sj_group_config.id IS '主键';
COMMENT ON COLUMN sj_group_config.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_group_config.group_name IS '组名称';
COMMENT ON COLUMN sj_group_config.description IS '组描述';
COMMENT ON COLUMN sj_group_config.token IS 'token';
COMMENT ON COLUMN sj_group_config.group_status IS '组状态 0、未启用 1、启用';
COMMENT ON COLUMN sj_group_config.version IS '版本号';
COMMENT ON COLUMN sj_group_config.group_partition IS '分区';
COMMENT ON COLUMN sj_group_config.id_generator_mode IS '唯一id生成模式 默认号段模式';
COMMENT ON COLUMN sj_group_config.init_scene IS '是否初始化场景 0:否 1:是';
COMMENT ON COLUMN sj_group_config.bucket_index IS 'bucket';
COMMENT ON COLUMN sj_group_config.create_dt IS '创建时间';
COMMENT ON COLUMN sj_group_config.update_dt IS '修改时间';
COMMENT ON TABLE sj_group_config IS '组配置';
-- sj_notify_config
CREATE TABLE sj_notify_config
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
business_id varchar(64) NULL,
system_task_type smallint DEFAULT 3 NOT NULL,
notify_status smallint DEFAULT 0 NOT NULL,
recipient_ids varchar(128) NULL,
notify_threshold int DEFAULT 0 NOT NULL,
notify_scene smallint DEFAULT 0 NOT NULL,
rate_limiter_status smallint DEFAULT 0 NOT NULL,
rate_limiter_threshold int DEFAULT 0 NOT NULL,
description varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_notify_config_01 ON sj_notify_config (namespace_id, group_name, business_id);
COMMENT ON COLUMN sj_notify_config.id IS '主键';
COMMENT ON COLUMN sj_notify_config.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_notify_config.group_name IS '组名称';
COMMENT ON COLUMN sj_notify_config.business_id IS '业务id ( job_id或workflow_id或scene_name ) ';
COMMENT ON COLUMN sj_notify_config.system_task_type IS '任务类型 1. 重试任务 2. 重试回调 3、JOB任务 4、WORKFLOW任务';
COMMENT ON COLUMN sj_notify_config.notify_status IS '通知状态 0、未启用 1、启用';
COMMENT ON COLUMN sj_notify_config.recipient_ids IS '接收人id列表';
COMMENT ON COLUMN sj_notify_config.notify_threshold IS '通知阈值';
COMMENT ON COLUMN sj_notify_config.notify_scene IS '通知场景';
COMMENT ON COLUMN sj_notify_config.rate_limiter_status IS '限流状态 0、未启用 1、启用';
COMMENT ON COLUMN sj_notify_config.rate_limiter_threshold IS '每秒限流阈值';
COMMENT ON COLUMN sj_notify_config.description IS '描述';
COMMENT ON COLUMN sj_notify_config.create_dt IS '创建时间';
COMMENT ON COLUMN sj_notify_config.update_dt IS '修改时间';
COMMENT ON TABLE sj_notify_config IS '通知配置';
-- sj_notify_recipient
CREATE TABLE sj_notify_recipient
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
recipient_name varchar(64) NULL,
notify_type smallint DEFAULT 0 NOT NULL,
notify_attribute varchar(512) NULL,
description varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_notify_recipient_01 ON sj_notify_recipient (namespace_id);
COMMENT ON COLUMN sj_notify_recipient.id IS '主键';
COMMENT ON COLUMN sj_notify_recipient.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_notify_recipient.recipient_name IS '接收人名称';
COMMENT ON COLUMN sj_notify_recipient.notify_type IS '通知类型 1、钉钉 2、邮件 3、企业微信 4 飞书 5 webhook';
COMMENT ON COLUMN sj_notify_recipient.notify_attribute IS '配置属性';
COMMENT ON COLUMN sj_notify_recipient.description IS '描述';
COMMENT ON COLUMN sj_notify_recipient.create_dt IS '创建时间';
COMMENT ON COLUMN sj_notify_recipient.update_dt IS '修改时间';
COMMENT ON TABLE sj_notify_recipient IS '告警通知接收人';
-- sj_retry_dead_letter_0
CREATE TABLE sj_retry_dead_letter_0
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
unique_id varchar(64) NULL,
group_name varchar(64) NULL,
scene_name varchar(64) NULL,
idempotent_id varchar(64) NULL,
biz_no varchar(64) DEFAULT '' NULL,
executor_name varchar(512) DEFAULT '' NULL,
args_str text NULL,
ext_attrs text NULL,
task_type smallint DEFAULT 1 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_retry_dead_letter_0_01 ON sj_retry_dead_letter_0 (namespace_id, group_name, unique_id);
CREATE INDEX idx_sj_retry_dead_letter_0_01 ON sj_retry_dead_letter_0 (namespace_id, group_name, scene_name);
CREATE INDEX idx_sj_retry_dead_letter_0_02 ON sj_retry_dead_letter_0 (idempotent_id);
CREATE INDEX idx_sj_retry_dead_letter_0_03 ON sj_retry_dead_letter_0 (biz_no);
CREATE INDEX idx_sj_retry_dead_letter_0_04 ON sj_retry_dead_letter_0 (create_dt);
COMMENT ON COLUMN sj_retry_dead_letter_0.id IS '主键';
COMMENT ON COLUMN sj_retry_dead_letter_0.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_dead_letter_0.unique_id IS '同组下id唯一';
COMMENT ON COLUMN sj_retry_dead_letter_0.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_dead_letter_0.scene_name IS '场景名称';
COMMENT ON COLUMN sj_retry_dead_letter_0.idempotent_id IS '幂等id';
COMMENT ON COLUMN sj_retry_dead_letter_0.biz_no IS '业务编号';
COMMENT ON COLUMN sj_retry_dead_letter_0.executor_name IS '执行器名称';
COMMENT ON COLUMN sj_retry_dead_letter_0.args_str IS '执行方法参数';
COMMENT ON COLUMN sj_retry_dead_letter_0.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_retry_dead_letter_0.task_type IS '任务类型 1、重试数据 2、回调数据';
COMMENT ON COLUMN sj_retry_dead_letter_0.create_dt IS '创建时间';
COMMENT ON TABLE sj_retry_dead_letter_0 IS '死信队列表';
-- sj_retry_task_0
CREATE TABLE sj_retry_task_0
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
unique_id varchar(64) NULL,
group_name varchar(64) NULL,
scene_name varchar(64) NULL,
idempotent_id varchar(64) NULL,
biz_no varchar(64) DEFAULT '' NULL,
executor_name varchar(512) DEFAULT '' NULL,
args_str text NULL,
ext_attrs text NULL,
next_trigger_at datetime NOT NULL,
retry_count int DEFAULT 0 NOT NULL,
retry_status smallint DEFAULT 0 NOT NULL,
task_type smallint DEFAULT 1 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_retry_task_0_01 ON sj_retry_task_0 (namespace_id, group_name, unique_id);
CREATE INDEX idx_sj_retry_task_0_01 ON sj_retry_task_0 (namespace_id, group_name, scene_name);
CREATE INDEX idx_sj_retry_task_0_02 ON sj_retry_task_0 (namespace_id, group_name, task_type);
CREATE INDEX idx_sj_retry_task_0_03 ON sj_retry_task_0 (namespace_id, group_name, retry_status);
CREATE INDEX idx_sj_retry_task_0_04 ON sj_retry_task_0 (idempotent_id);
CREATE INDEX idx_sj_retry_task_0_05 ON sj_retry_task_0 (biz_no);
CREATE INDEX idx_sj_retry_task_0_06 ON sj_retry_task_0 (create_dt);
COMMENT ON COLUMN sj_retry_task_0.id IS '主键';
COMMENT ON COLUMN sj_retry_task_0.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_task_0.unique_id IS '同组下id唯一';
COMMENT ON COLUMN sj_retry_task_0.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_task_0.scene_name IS '场景名称';
COMMENT ON COLUMN sj_retry_task_0.idempotent_id IS '幂等id';
COMMENT ON COLUMN sj_retry_task_0.biz_no IS '业务编号';
COMMENT ON COLUMN sj_retry_task_0.executor_name IS '执行器名称';
COMMENT ON COLUMN sj_retry_task_0.args_str IS '执行方法参数';
COMMENT ON COLUMN sj_retry_task_0.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_retry_task_0.next_trigger_at IS '下次触发时间';
COMMENT ON COLUMN sj_retry_task_0.retry_count IS '重试次数';
COMMENT ON COLUMN sj_retry_task_0.retry_status IS '重试状态 0、重试中 1、成功 2、最大重试次数';
COMMENT ON COLUMN sj_retry_task_0.task_type IS '任务类型 1、重试数据 2、回调数据';
COMMENT ON COLUMN sj_retry_task_0.create_dt IS '创建时间';
COMMENT ON COLUMN sj_retry_task_0.update_dt IS '修改时间';
COMMENT ON TABLE sj_retry_task_0 IS '任务表';
-- sj_retry_task_log
CREATE TABLE sj_retry_task_log
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
unique_id varchar(64) NULL,
group_name varchar(64) NULL,
scene_name varchar(64) NULL,
idempotent_id varchar(64) NULL,
biz_no varchar(64) DEFAULT '' NULL,
executor_name varchar(512) DEFAULT '' NULL,
args_str text NULL,
ext_attrs text NULL,
retry_status smallint DEFAULT 0 NOT NULL,
task_type smallint DEFAULT 1 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_retry_task_log_01 ON sj_retry_task_log (namespace_id, group_name, scene_name);
CREATE INDEX idx_sj_retry_task_log_02 ON sj_retry_task_log (retry_status);
CREATE INDEX idx_sj_retry_task_log_03 ON sj_retry_task_log (idempotent_id);
CREATE INDEX idx_sj_retry_task_log_04 ON sj_retry_task_log (unique_id);
CREATE INDEX idx_sj_retry_task_log_05 ON sj_retry_task_log (biz_no);
CREATE INDEX idx_sj_retry_task_log_06 ON sj_retry_task_log (create_dt);
COMMENT ON COLUMN sj_retry_task_log.id IS '主键';
COMMENT ON COLUMN sj_retry_task_log.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_task_log.unique_id IS '同组下id唯一';
COMMENT ON COLUMN sj_retry_task_log.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_task_log.scene_name IS '场景名称';
COMMENT ON COLUMN sj_retry_task_log.idempotent_id IS '幂等id';
COMMENT ON COLUMN sj_retry_task_log.biz_no IS '业务编号';
COMMENT ON COLUMN sj_retry_task_log.executor_name IS '执行器名称';
COMMENT ON COLUMN sj_retry_task_log.args_str IS '执行方法参数';
COMMENT ON COLUMN sj_retry_task_log.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_retry_task_log.retry_status IS '重试状态 0、重试中 1、成功 2、最大次数';
COMMENT ON COLUMN sj_retry_task_log.task_type IS '任务类型 1、重试数据 2、回调数据';
COMMENT ON COLUMN sj_retry_task_log.create_dt IS '创建时间';
COMMENT ON COLUMN sj_retry_task_log.update_dt IS '修改时间';
COMMENT ON TABLE sj_retry_task_log IS '任务日志基础信息表';
-- sj_retry_task_log_message
CREATE TABLE sj_retry_task_log_message
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
unique_id varchar(64) NULL,
message text NULL,
log_num int DEFAULT 1 NOT NULL,
real_time bigint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_retry_task_log_message_01 ON sj_retry_task_log_message (namespace_id, group_name, unique_id);
CREATE INDEX idx_sj_retry_task_log_message_02 ON sj_retry_task_log_message (create_dt);
COMMENT ON COLUMN sj_retry_task_log_message.id IS '主键';
COMMENT ON COLUMN sj_retry_task_log_message.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_task_log_message.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_task_log_message.unique_id IS '同组下id唯一';
COMMENT ON COLUMN sj_retry_task_log_message.message IS '异常信息';
COMMENT ON COLUMN sj_retry_task_log_message.log_num IS '日志数量';
COMMENT ON COLUMN sj_retry_task_log_message.real_time IS '上报时间';
COMMENT ON COLUMN sj_retry_task_log_message.create_dt IS '创建时间';
COMMENT ON TABLE sj_retry_task_log_message IS '任务调度日志信息记录表';
-- sj_retry_scene_config
CREATE TABLE sj_retry_scene_config
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
scene_name varchar(64) NULL,
group_name varchar(64) NULL,
scene_status smallint DEFAULT 0 NOT NULL,
max_retry_count int DEFAULT 5 NOT NULL,
back_off smallint DEFAULT 1 NOT NULL,
trigger_interval varchar(16) DEFAULT '' NULL,
deadline_request bigint DEFAULT 60000 NOT NULL,
executor_timeout int DEFAULT 5 NOT NULL,
route_key smallint DEFAULT 4 NOT NULL,
description varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_retry_scene_config_01 ON sj_retry_scene_config (namespace_id, group_name, scene_name);
COMMENT ON COLUMN sj_retry_scene_config.id IS '主键';
COMMENT ON COLUMN sj_retry_scene_config.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_scene_config.scene_name IS '场景名称';
COMMENT ON COLUMN sj_retry_scene_config.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_scene_config.scene_status IS '组状态 0、未启用 1、启用';
COMMENT ON COLUMN sj_retry_scene_config.max_retry_count IS '最大重试次数';
COMMENT ON COLUMN sj_retry_scene_config.back_off IS '1、默认等级 2、固定间隔时间 3、CRON 表达式';
COMMENT ON COLUMN sj_retry_scene_config.trigger_interval IS '间隔时长';
COMMENT ON COLUMN sj_retry_scene_config.deadline_request IS 'Deadline Request 调用链超时 单位毫秒';
COMMENT ON COLUMN sj_retry_scene_config.executor_timeout IS '任务执行超时时间,单位秒';
COMMENT ON COLUMN sj_retry_scene_config.route_key IS '路由策略';
COMMENT ON COLUMN sj_retry_scene_config.description IS '描述';
COMMENT ON COLUMN sj_retry_scene_config.create_dt IS '创建时间';
COMMENT ON COLUMN sj_retry_scene_config.update_dt IS '修改时间';
COMMENT ON TABLE sj_retry_scene_config IS '场景配置';
-- sj_server_node
CREATE TABLE sj_server_node
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
host_id varchar(64) NULL,
host_ip varchar(64) NULL,
host_port int NOT NULL,
expire_at datetime NOT NULL,
node_type smallint NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_server_node_01 ON sj_server_node (host_id, host_ip);
CREATE INDEX idx_sj_server_node_01 ON sj_server_node (namespace_id, group_name);
CREATE INDEX idx_sj_server_node_02 ON sj_server_node (expire_at, node_type);
COMMENT ON COLUMN sj_server_node.id IS '主键';
COMMENT ON COLUMN sj_server_node.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_server_node.group_name IS '组名称';
COMMENT ON COLUMN sj_server_node.host_id IS '主机id';
COMMENT ON COLUMN sj_server_node.host_ip IS '机器ip';
COMMENT ON COLUMN sj_server_node.host_port IS '机器端口';
COMMENT ON COLUMN sj_server_node.expire_at IS '过期时间';
COMMENT ON COLUMN sj_server_node.node_type IS '节点类型 1、客户端 2、是服务端';
COMMENT ON COLUMN sj_server_node.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_server_node.create_dt IS '创建时间';
COMMENT ON COLUMN sj_server_node.update_dt IS '修改时间';
COMMENT ON TABLE sj_server_node IS '服务器节点';
-- sj_distributed_lock
CREATE TABLE sj_distributed_lock
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
name varchar(64) NULL,
lock_until timestamp(3) DEFAULT CURRENT_TIMESTAMP(3) NOT NULL,
locked_at timestamp(3) DEFAULT CURRENT_TIMESTAMP(3) NOT NULL,
locked_by varchar(255) NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
COMMENT ON COLUMN sj_distributed_lock.id IS '主键';
COMMENT ON COLUMN sj_distributed_lock.name IS '锁名称';
COMMENT ON COLUMN sj_distributed_lock.lock_until IS '锁定时长';
COMMENT ON COLUMN sj_distributed_lock.locked_at IS '锁定时间';
COMMENT ON COLUMN sj_distributed_lock.locked_by IS '锁定者';
COMMENT ON COLUMN sj_distributed_lock.create_dt IS '创建时间';
COMMENT ON COLUMN sj_distributed_lock.update_dt IS '修改时间';
COMMENT ON TABLE sj_distributed_lock IS '锁定表';
-- sj_system_user
CREATE TABLE sj_system_user
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
username varchar(64) NULL,
password varchar(128) NULL,
role smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
COMMENT ON COLUMN sj_system_user.id IS '主键';
COMMENT ON COLUMN sj_system_user.username IS '账号';
COMMENT ON COLUMN sj_system_user.password IS '密码';
COMMENT ON COLUMN sj_system_user.role IS '角色1-普通用户、2-管理员';
COMMENT ON COLUMN sj_system_user.create_dt IS '创建时间';
COMMENT ON COLUMN sj_system_user.update_dt IS '修改时间';
COMMENT ON TABLE sj_system_user IS '系统用户表';
INSERT INTO sj_system_user (username, password, role)
VALUES ('admin', '465c194afb65670f38322df087f0a9bb225cc257e43eb4ac5a0c98ef5b3173ac', 2);
-- sj_system_user_permission
CREATE TABLE sj_system_user_permission
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
group_name varchar(64) NULL,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
system_user_id bigint NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_system_user_permission_01 ON sj_system_user_permission (namespace_id, group_name, system_user_id);
COMMENT ON COLUMN sj_system_user_permission.id IS '主键';
COMMENT ON COLUMN sj_system_user_permission.group_name IS '组名称';
COMMENT ON COLUMN sj_system_user_permission.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_system_user_permission.system_user_id IS '系统用户id';
COMMENT ON COLUMN sj_system_user_permission.create_dt IS '创建时间';
COMMENT ON COLUMN sj_system_user_permission.update_dt IS '修改时间';
COMMENT ON TABLE sj_system_user_permission IS '系统用户权限表';
-- sj_sequence_alloc
CREATE TABLE sj_sequence_alloc
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) DEFAULT '' NULL,
max_id bigint DEFAULT 1 NOT NULL,
step int DEFAULT 100 NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_sequence_alloc_01 ON sj_sequence_alloc (namespace_id, group_name);
COMMENT ON COLUMN sj_sequence_alloc.id IS '主键';
COMMENT ON COLUMN sj_sequence_alloc.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_sequence_alloc.group_name IS '组名称';
COMMENT ON COLUMN sj_sequence_alloc.max_id IS '最大id';
COMMENT ON COLUMN sj_sequence_alloc.step IS '步长';
COMMENT ON COLUMN sj_sequence_alloc.update_dt IS '更新时间';
COMMENT ON TABLE sj_sequence_alloc IS '号段模式序号ID分配表';
-- sj_job
CREATE TABLE sj_job
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
job_name varchar(64) NULL,
args_str text DEFAULT NULL NULL,
args_type smallint DEFAULT 1 NOT NULL,
next_trigger_at bigint NOT NULL,
job_status smallint DEFAULT 1 NOT NULL,
task_type smallint DEFAULT 1 NOT NULL,
route_key smallint DEFAULT 4 NOT NULL,
executor_type smallint DEFAULT 1 NOT NULL,
executor_info varchar(255) DEFAULT NULL NULL,
trigger_type smallint NOT NULL,
trigger_interval varchar(255) NULL,
block_strategy smallint DEFAULT 1 NOT NULL,
executor_timeout int DEFAULT 0 NOT NULL,
max_retry_times int DEFAULT 0 NOT NULL,
parallel_num int DEFAULT 1 NOT NULL,
retry_interval int DEFAULT 0 NOT NULL,
bucket_index int DEFAULT 0 NOT NULL,
resident smallint DEFAULT 0 NOT NULL,
description varchar(256) DEFAULT '' NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_job_01 ON sj_job (namespace_id, group_name);
CREATE INDEX idx_sj_job_02 ON sj_job (job_status, bucket_index);
CREATE INDEX idx_sj_job_03 ON sj_job (create_dt);
COMMENT ON COLUMN sj_job.id IS '主键';
COMMENT ON COLUMN sj_job.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_job.group_name IS '组名称';
COMMENT ON COLUMN sj_job.job_name IS '名称';
COMMENT ON COLUMN sj_job.args_str IS '执行方法参数';
COMMENT ON COLUMN sj_job.args_type IS '参数类型 ';
COMMENT ON COLUMN sj_job.next_trigger_at IS '下次触发时间';
COMMENT ON COLUMN sj_job.job_status IS '任务状态 0、关闭、1、开启';
COMMENT ON COLUMN sj_job.task_type IS '任务类型 1、集群 2、广播 3、切片';
COMMENT ON COLUMN sj_job.route_key IS '路由策略';
COMMENT ON COLUMN sj_job.executor_type IS '执行器类型';
COMMENT ON COLUMN sj_job.executor_info IS '执行器名称';
COMMENT ON COLUMN sj_job.trigger_type IS '触发类型 1.CRON 表达式 2. 固定时间';
COMMENT ON COLUMN sj_job.trigger_interval IS '间隔时长';
COMMENT ON COLUMN sj_job.block_strategy IS '阻塞策略 1、丢弃 2、覆盖 3、并行';
COMMENT ON COLUMN sj_job.executor_timeout IS '任务执行超时时间,单位秒';
COMMENT ON COLUMN sj_job.max_retry_times IS '最大重试次数';
COMMENT ON COLUMN sj_job.parallel_num IS '并行数';
COMMENT ON COLUMN sj_job.retry_interval IS '重试间隔 ( s ) ';
COMMENT ON COLUMN sj_job.bucket_index IS 'bucket';
COMMENT ON COLUMN sj_job.resident IS '是否是常驻任务';
COMMENT ON COLUMN sj_job.description IS '描述';
COMMENT ON COLUMN sj_job.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_job.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_job.create_dt IS '创建时间';
COMMENT ON COLUMN sj_job.update_dt IS '修改时间';
COMMENT ON TABLE sj_job IS '任务信息';
-- sj_job_log_message
CREATE TABLE sj_job_log_message
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
job_id bigint NOT NULL,
task_batch_id bigint NOT NULL,
task_id bigint NOT NULL,
message text NULL,
log_num int DEFAULT 1 NOT NULL,
real_time bigint DEFAULT 0 NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_job_log_message_01 ON sj_job_log_message (task_batch_id, task_id);
CREATE INDEX idx_sj_job_log_message_02 ON sj_job_log_message (create_dt);
CREATE INDEX idx_sj_job_log_message_03 ON sj_job_log_message (namespace_id, group_name);
COMMENT ON COLUMN sj_job_log_message.id IS '主键';
COMMENT ON COLUMN sj_job_log_message.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_job_log_message.group_name IS '组名称';
COMMENT ON COLUMN sj_job_log_message.job_id IS '任务信息id';
COMMENT ON COLUMN sj_job_log_message.task_batch_id IS '任务批次id';
COMMENT ON COLUMN sj_job_log_message.task_id IS '调度任务id';
COMMENT ON COLUMN sj_job_log_message.message IS '调度信息';
COMMENT ON COLUMN sj_job_log_message.log_num IS '日志数量';
COMMENT ON COLUMN sj_job_log_message.real_time IS '上报时间';
COMMENT ON COLUMN sj_job_log_message.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_job_log_message.create_dt IS '创建时间';
COMMENT ON TABLE sj_job_log_message IS '调度日志';
-- sj_job_task
CREATE TABLE sj_job_task
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
job_id bigint NOT NULL,
task_batch_id bigint NOT NULL,
parent_id bigint DEFAULT 0 NOT NULL,
task_status smallint DEFAULT 0 NOT NULL,
retry_count int DEFAULT 0 NOT NULL,
client_info varchar(128) DEFAULT NULL NULL,
result_message text NULL,
args_str text DEFAULT NULL NULL,
args_type smallint DEFAULT 1 NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_job_task_01 ON sj_job_task (task_batch_id, task_status);
CREATE INDEX idx_sj_job_task_02 ON sj_job_task (create_dt);
CREATE INDEX idx_sj_job_task_03 ON sj_job_task (namespace_id, group_name);
COMMENT ON COLUMN sj_job_task.id IS '主键';
COMMENT ON COLUMN sj_job_task.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_job_task.group_name IS '组名称';
COMMENT ON COLUMN sj_job_task.job_id IS '任务信息id';
COMMENT ON COLUMN sj_job_task.task_batch_id IS '调度任务id';
COMMENT ON COLUMN sj_job_task.parent_id IS '父执行器id';
COMMENT ON COLUMN sj_job_task.task_status IS '执行的状态 0、失败 1、成功';
COMMENT ON COLUMN sj_job_task.retry_count IS '重试次数';
COMMENT ON COLUMN sj_job_task.client_info IS '客户端地址 clientId#ip:port';
COMMENT ON COLUMN sj_job_task.result_message IS '执行结果';
COMMENT ON COLUMN sj_job_task.args_str IS '执行方法参数';
COMMENT ON COLUMN sj_job_task.args_type IS '参数类型 ';
COMMENT ON COLUMN sj_job_task.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_job_task.create_dt IS '创建时间';
COMMENT ON COLUMN sj_job_task.update_dt IS '修改时间';
COMMENT ON TABLE sj_job_task IS '任务实例';
-- sj_job_task_batch
CREATE TABLE sj_job_task_batch
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
job_id bigint NOT NULL,
workflow_node_id bigint DEFAULT 0 NOT NULL,
parent_workflow_node_id bigint DEFAULT 0 NOT NULL,
workflow_task_batch_id bigint DEFAULT 0 NOT NULL,
task_batch_status smallint DEFAULT 0 NOT NULL,
operation_reason smallint DEFAULT 0 NOT NULL,
execution_at bigint DEFAULT 0 NOT NULL,
system_task_type smallint DEFAULT 3 NOT NULL,
parent_id varchar(64) DEFAULT '' NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_job_task_batch_01 ON sj_job_task_batch (job_id, task_batch_status);
CREATE INDEX idx_sj_job_task_batch_02 ON sj_job_task_batch (create_dt);
CREATE INDEX idx_sj_job_task_batch_03 ON sj_job_task_batch (namespace_id, group_name);
CREATE INDEX idx_sj_job_task_batch_04 ON sj_job_task_batch (workflow_task_batch_id, workflow_node_id);
COMMENT ON COLUMN sj_job_task_batch.id IS '主键';
COMMENT ON COLUMN sj_job_task_batch.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_job_task_batch.group_name IS '组名称';
COMMENT ON COLUMN sj_job_task_batch.job_id IS '任务id';
COMMENT ON COLUMN sj_job_task_batch.workflow_node_id IS '工作流节点id';
COMMENT ON COLUMN sj_job_task_batch.parent_workflow_node_id IS '工作流任务父批次id';
COMMENT ON COLUMN sj_job_task_batch.workflow_task_batch_id IS '工作流任务批次id';
COMMENT ON COLUMN sj_job_task_batch.task_batch_status IS '任务批次状态 0、失败 1、成功';
COMMENT ON COLUMN sj_job_task_batch.operation_reason IS '操作原因';
COMMENT ON COLUMN sj_job_task_batch.execution_at IS '任务执行时间';
COMMENT ON COLUMN sj_job_task_batch.system_task_type IS '任务类型 3、JOB任务 4、WORKFLOW任务';
COMMENT ON COLUMN sj_job_task_batch.parent_id IS '父节点';
COMMENT ON COLUMN sj_job_task_batch.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_job_task_batch.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_job_task_batch.create_dt IS '创建时间';
COMMENT ON COLUMN sj_job_task_batch.update_dt IS '修改时间';
COMMENT ON TABLE sj_job_task_batch IS '任务批次';
-- sj_job_summary
CREATE TABLE sj_job_summary
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) DEFAULT '' NULL,
business_id bigint NOT NULL,
system_task_type smallint DEFAULT 3 NOT NULL,
trigger_at datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
success_num int DEFAULT 0 NOT NULL,
fail_num int DEFAULT 0 NOT NULL,
fail_reason varchar(512) DEFAULT '' NULL,
stop_num int DEFAULT 0 NOT NULL,
stop_reason varchar(512) DEFAULT '' NULL,
cancel_num int DEFAULT 0 NOT NULL,
cancel_reason varchar(512) DEFAULT '' NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_job_summary_01 ON sj_job_summary (trigger_at, system_task_type, business_id);
CREATE INDEX idx_sj_job_summary_01 ON sj_job_summary (namespace_id, group_name, business_id);
COMMENT ON COLUMN sj_job_summary.id IS '主键';
COMMENT ON COLUMN sj_job_summary.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_job_summary.group_name IS '组名称';
COMMENT ON COLUMN sj_job_summary.business_id IS '业务id ( job_id或workflow_id ) ';
COMMENT ON COLUMN sj_job_summary.system_task_type IS '任务类型 3、JOB任务 4、WORKFLOW任务';
COMMENT ON COLUMN sj_job_summary.trigger_at IS '统计时间';
COMMENT ON COLUMN sj_job_summary.success_num IS '执行成功-日志数量';
COMMENT ON COLUMN sj_job_summary.fail_num IS '执行失败-日志数量';
COMMENT ON COLUMN sj_job_summary.fail_reason IS '失败原因';
COMMENT ON COLUMN sj_job_summary.stop_num IS '执行失败-日志数量';
COMMENT ON COLUMN sj_job_summary.stop_reason IS '失败原因';
COMMENT ON COLUMN sj_job_summary.cancel_num IS '执行失败-日志数量';
COMMENT ON COLUMN sj_job_summary.cancel_reason IS '失败原因';
COMMENT ON COLUMN sj_job_summary.create_dt IS '创建时间';
COMMENT ON COLUMN sj_job_summary.update_dt IS '修改时间';
COMMENT ON TABLE sj_job_summary IS 'DashBoard_Job';
-- sj_retry_summary
CREATE TABLE sj_retry_summary
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) DEFAULT '' NULL,
scene_name varchar(50) DEFAULT '' NULL,
trigger_at datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
running_num int DEFAULT 0 NOT NULL,
finish_num int DEFAULT 0 NOT NULL,
max_count_num int DEFAULT 0 NOT NULL,
suspend_num int DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE UNIQUE INDEX uk_sj_retry_summary_01 ON sj_retry_summary (namespace_id, group_name, scene_name, trigger_at);
CREATE INDEX idx_sj_retry_summary_01 ON sj_retry_summary (trigger_at);
COMMENT ON COLUMN sj_retry_summary.id IS '主键';
COMMENT ON COLUMN sj_retry_summary.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_retry_summary.group_name IS '组名称';
COMMENT ON COLUMN sj_retry_summary.scene_name IS '场景名称';
COMMENT ON COLUMN sj_retry_summary.trigger_at IS '统计时间';
COMMENT ON COLUMN sj_retry_summary.running_num IS '重试中-日志数量';
COMMENT ON COLUMN sj_retry_summary.finish_num IS '重试完成-日志数量';
COMMENT ON COLUMN sj_retry_summary.max_count_num IS '重试到达最大次数-日志数量';
COMMENT ON COLUMN sj_retry_summary.suspend_num IS '暂停重试-日志数量';
COMMENT ON COLUMN sj_retry_summary.create_dt IS '创建时间';
COMMENT ON COLUMN sj_retry_summary.update_dt IS '修改时间';
COMMENT ON TABLE sj_retry_summary IS 'DashBoard_Retry';
-- sj_workflow
CREATE TABLE sj_workflow
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
workflow_name varchar(64) NULL,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
workflow_status smallint DEFAULT 1 NOT NULL,
trigger_type smallint NOT NULL,
trigger_interval varchar(255) NULL,
next_trigger_at bigint NOT NULL,
block_strategy smallint DEFAULT 1 NOT NULL,
executor_timeout int DEFAULT 0 NOT NULL,
description varchar(256) DEFAULT '' NULL,
flow_info text DEFAULT NULL NULL,
bucket_index int DEFAULT 0 NOT NULL,
version int NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_workflow_01 ON sj_workflow (create_dt);
CREATE INDEX idx_sj_workflow_02 ON sj_workflow (namespace_id, group_name);
COMMENT ON COLUMN sj_workflow.id IS '主键';
COMMENT ON COLUMN sj_workflow.workflow_name IS '工作流名称';
COMMENT ON COLUMN sj_workflow.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_workflow.group_name IS '组名称';
COMMENT ON COLUMN sj_workflow.workflow_status IS '工作流状态 0、关闭、1、开启';
COMMENT ON COLUMN sj_workflow.trigger_type IS '触发类型 1.CRON 表达式 2. 固定时间';
COMMENT ON COLUMN sj_workflow.trigger_interval IS '间隔时长';
COMMENT ON COLUMN sj_workflow.next_trigger_at IS '下次触发时间';
COMMENT ON COLUMN sj_workflow.block_strategy IS '阻塞策略 1、丢弃 2、覆盖 3、并行';
COMMENT ON COLUMN sj_workflow.executor_timeout IS '任务执行超时时间,单位秒';
COMMENT ON COLUMN sj_workflow.description IS '描述';
COMMENT ON COLUMN sj_workflow.flow_info IS '流程信息';
COMMENT ON COLUMN sj_workflow.bucket_index IS 'bucket';
COMMENT ON COLUMN sj_workflow.version IS '版本号';
COMMENT ON COLUMN sj_workflow.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_workflow.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_workflow.create_dt IS '创建时间';
COMMENT ON COLUMN sj_workflow.update_dt IS '修改时间';
COMMENT ON TABLE sj_workflow IS '工作流';
-- sj_workflow_node
CREATE TABLE sj_workflow_node
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
node_name varchar(64) NULL,
group_name varchar(64) NULL,
job_id bigint NOT NULL,
workflow_id bigint NOT NULL,
node_type smallint DEFAULT 1 NOT NULL,
expression_type smallint DEFAULT 0 NOT NULL,
fail_strategy smallint DEFAULT 1 NOT NULL,
workflow_node_status smallint DEFAULT 1 NOT NULL,
priority_level int DEFAULT 1 NOT NULL,
node_info text DEFAULT NULL NULL,
version int NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_workflow_node_01 ON sj_workflow_node (create_dt);
CREATE INDEX idx_sj_workflow_node_02 ON sj_workflow_node (namespace_id, group_name);
COMMENT ON COLUMN sj_workflow_node.id IS '主键';
COMMENT ON COLUMN sj_workflow_node.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_workflow_node.node_name IS '节点名称';
COMMENT ON COLUMN sj_workflow_node.group_name IS '组名称';
COMMENT ON COLUMN sj_workflow_node.job_id IS '任务信息id';
COMMENT ON COLUMN sj_workflow_node.workflow_id IS '工作流ID';
COMMENT ON COLUMN sj_workflow_node.node_type IS '1、任务节点 2、条件节点';
COMMENT ON COLUMN sj_workflow_node.expression_type IS '1、SpEl、2、Aviator 3、QL';
COMMENT ON COLUMN sj_workflow_node.fail_strategy IS '失败策略 1、跳过 2、阻塞';
COMMENT ON COLUMN sj_workflow_node.workflow_node_status IS '工作流节点状态 0、关闭、1、开启';
COMMENT ON COLUMN sj_workflow_node.priority_level IS '优先级';
COMMENT ON COLUMN sj_workflow_node.node_info IS '节点信息 ';
COMMENT ON COLUMN sj_workflow_node.version IS '版本号';
COMMENT ON COLUMN sj_workflow_node.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_workflow_node.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_workflow_node.create_dt IS '创建时间';
COMMENT ON COLUMN sj_workflow_node.update_dt IS '修改时间';
COMMENT ON TABLE sj_workflow_node IS '工作流节点';
-- sj_workflow_task_batch
CREATE TABLE sj_workflow_task_batch
(
id bigint NOT NULL PRIMARY KEY IDENTITY,
namespace_id varchar(64) DEFAULT '764d604ec6fc45f68cd92514c40e9e1a' NULL,
group_name varchar(64) NULL,
workflow_id bigint NOT NULL,
task_batch_status smallint DEFAULT 0 NOT NULL,
operation_reason smallint DEFAULT 0 NOT NULL,
flow_info text DEFAULT NULL NULL,
execution_at bigint DEFAULT 0 NOT NULL,
ext_attrs varchar(256) DEFAULT '' NULL,
deleted smallint DEFAULT 0 NOT NULL,
create_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_dt datetime DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX idx_sj_workflow_task_batch_01 ON sj_workflow_task_batch (workflow_id, task_batch_status);
CREATE INDEX idx_sj_workflow_task_batch_02 ON sj_workflow_task_batch (create_dt);
CREATE INDEX idx_sj_workflow_task_batch_03 ON sj_workflow_task_batch (namespace_id, group_name);
COMMENT ON COLUMN sj_workflow_task_batch.id IS '主键';
COMMENT ON COLUMN sj_workflow_task_batch.namespace_id IS '命名空间id';
COMMENT ON COLUMN sj_workflow_task_batch.group_name IS '组名称';
COMMENT ON COLUMN sj_workflow_task_batch.workflow_id IS '工作流任务id';
COMMENT ON COLUMN sj_workflow_task_batch.task_batch_status IS '任务批次状态 0、失败 1、成功';
COMMENT ON COLUMN sj_workflow_task_batch.operation_reason IS '操作原因';
COMMENT ON COLUMN sj_workflow_task_batch.flow_info IS '流程信息';
COMMENT ON COLUMN sj_workflow_task_batch.execution_at IS '任务执行时间';
COMMENT ON COLUMN sj_workflow_task_batch.ext_attrs IS '扩展字段';
COMMENT ON COLUMN sj_workflow_task_batch.deleted IS '逻辑删除 1、删除';
COMMENT ON COLUMN sj_workflow_task_batch.create_dt IS '创建时间';
COMMENT ON COLUMN sj_workflow_task_batch.update_dt IS '修改时间';
COMMENT ON TABLE sj_workflow_task_batch IS '工作流批次';

View File

@ -626,13 +626,156 @@ GO
return script
class DM8Convertor(Convertor):
def __init__(self, src):
super().__init__(src, "DM8")
# if type == "varchar":
# return f"varchar2({size if size < 4000 else 4000})"
# if type in ("int", "int unsigned"):
# return "number"
# if type == "bigint" or type == "bigint unsigned":
# return "number"
# if type == "datetime":
# return "date"
# if type == "timestamp":
# return f"timestamp({size})"
# if type == "bit":
# return "number(1,0)"
# if type in ("tinyint", "smallint"):
# return "smallint"
# if type in ("text", "longtext"):
# return "clob"
# if type in ("blob", "mediumblob"):
# return "blob"
# if type == "decimal":
# return (
# f"number({','.join(str(s) for s in size)})" if len(size) else "number"
# )
def translate_type(self, type: str, size: Optional[Union[int, Tuple[int]]]):
"""类型转换"""
type = type.lower()
if type == "varchar":
return f"varchar({size})"
if type in ("int", "int unsigned"):
return "int"
if type in ("bigint", "bigint unsigned"):
return "bigint"
if type == "datetime":
return "datetime"
if type == "timestamp":
return f"timestamp({size})"
if type == "bit":
return "bit"
if type in ("tinyint", "smallint"):
return "smallint"
if type in ("text", "longtext"):
return "text"
if type in ("blob", "mediumblob"):
return "blob"
if type == "decimal":
return (
f"decimal({','.join(str(s) for s in size)})" if len(size) else "decimal"
)
def gen_create(self, ddl) -> str:
"""生成 CREATE 语句"""
def generate_column(col):
name = col["name"].lower()
if name == "id":
return "id bigint NOT NULL PRIMARY KEY IDENTITY"
type = col["type"].lower()
full_type = self.translate_type(type, col["size"])
nullable = "NULL" if col["nullable"] else "NOT NULL"
# Oracle的 INSERT '' 不能通过NOT NULL校验因此对文字类型字段覆写为 NULL
nullable = "NULL" if type in ("varchar", "text", "longtext") else nullable
default = f"DEFAULT {col['default']}" if col["default"] is not None else ""
# Oracle 中 size 不能作为字段名
field_name = '"size"' if name == "size" else name
# Oracle DEFAULT 定义在 NULLABLE 之前
return f"{field_name} {full_type} {default} {nullable}"
table_name = ddl["table_name"].lower()
columns = [f"{generate_column(col).strip()}" for col in ddl["columns"]]
field_def_list = ",\n ".join(columns)
script = f"""-- {table_name}
CREATE TABLE {table_name} (
{field_def_list}
);"""
# oracle INSERT '' 不能通过 NOT NULL 校验
# script = script.replace("DEFAULT '' NOT NULL", "DEFAULT '' NULL")
return script
def gen_index(self, ddl: Dict) -> str:
return "\n".join(f"{script};" for script in self.index(ddl))
def gen_comment(self, table_ddl: Dict) -> str:
script = ""
for column in table_ddl["columns"]:
table_comment = column["comment"]
script += (
f"COMMENT ON COLUMN {table_ddl['table_name']}.{column['name']} IS '{table_comment}';"
+ "\n"
)
table_comment = table_ddl["comment"]
if table_comment:
script += (
f"COMMENT ON TABLE {table_ddl['table_name']} IS '{table_comment}';\n"
)
return script
def gen_pk(self, table_name: str) -> str:
"""生成主键定义"""
return ""
def gen_uk(self, table_ddl: Dict) -> str:
script = ""
uk_list = list(Convertor.unique_index(table_ddl))
for idx, (table_name, _, uk_columns) in enumerate(uk_list, 1):
uk_name = f"uk_{table_name}_{idx:02d}"
script += f"CREATE UNIQUE INDEX {uk_name} ON {table_name} ({', '.join(uk_columns)});\n"
return script
def gen_index(self, ddl: Dict) -> str:
return "\n".join(f"{script};" for script in self.index(ddl))
def gen_insert(self, table_name: str) -> str:
"""拷贝 INSERT 语句"""
inserts = []
for insert_script in Convertor.inserts(table_name, self.content):
insert_script = (
insert_script.replace("(id,", "(")
.replace("VALUES (1,", "VALUES (")
.replace("now(),", "sysdate,")
)
inserts.append(insert_script)
## 生成 insert 脚本
script = ""
if inserts:
inserts_lines = "\n".join(inserts)
script += f"""\n\n
{inserts_lines}"""
return script
def main():
parser = argparse.ArgumentParser(description="Snail Job Database Transfer Tool")
parser.add_argument(
"type",
type=str,
help="Target database type",
choices=["postgre", "oracle", "sqlserver"],
choices=["postgre", "oracle", "sqlserver", "dm8"],
)
args = parser.parse_args()
@ -644,6 +787,8 @@ def main():
convertor = OracleConvertor(sql_file)
elif args.type == "sqlserver":
convertor = SQLServerConvertor(sql_file)
elif args.type == "dm8":
convertor = DM8Convertor(sql_file)
else:
raise NotImplementedError(f"Database type not supported: {args.type}")

View File

@ -101,6 +101,11 @@
<artifactId>snail-job-sqlserver-datasource</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>com.aizuda</groupId>
<artifactId>snail-job-dm8-datasource</artifactId>
<version>${revision}</version>
</dependency>
<dependency>
<groupId>cn.hutool</groupId>
<artifactId>hutool-http</artifactId>

View File

@ -26,6 +26,7 @@
<module>snail-job-postgres-datasource</module>
<module>snail-job-oracle-datasource</module>
<module>snail-job-sqlserver-datasource</module>
<module>snail-job-dm8-datasource</module>
<module>snail-job-datasource-template</module>
</modules>

View File

@ -48,7 +48,8 @@ public abstract class AbstractConfigAccess<T> implements ConfigAccess<T> {
DbTypeEnum.MARIADB.getDb(),
DbTypeEnum.POSTGRES.getDb(),
DbTypeEnum.ORACLE.getDb(),
DbTypeEnum.SQLSERVER.getDb());
DbTypeEnum.SQLSERVER.getDb(),
DbTypeEnum.DM.getDb());
protected DbTypeEnum getDbType() {
return DbUtils.getDbType();

View File

@ -22,7 +22,8 @@ public abstract class AbstractTaskAccess<T> implements TaskAccess<T> {
DbTypeEnum.MARIADB.getDb(),
DbTypeEnum.POSTGRES.getDb(),
DbTypeEnum.ORACLE.getDb(),
DbTypeEnum.SQLSERVER.getDb());
DbTypeEnum.SQLSERVER.getDb(),
DbTypeEnum.DM.getDb());
protected DbTypeEnum getDbType() {
return DbUtils.getDbType();

View File

@ -19,7 +19,8 @@ public enum DbTypeEnum {
MARIADB("mariadb", "MariaDB数据库", DbType.MARIADB),
POSTGRES("postgresql", "Postgres数据库", DbType.POSTGRE_SQL),
ORACLE("oracle", "Oracle数据库", DbType.ORACLE_12C),
SQLSERVER("sqlserver", "SQLServer数据库", DbType.SQL_SERVER);
SQLSERVER("sqlserver", "SQLServer数据库", DbType.SQL_SERVER),
DM("dm", "达梦数据库", DbType.DM);
private final String db;
private final String desc;

View File

@ -0,0 +1,35 @@
HELP.md
target/
!.mvn/wrapper/maven-wrapper.jar
!**/src/main/**/target/
!**/src/test/**/target/
### STS ###
.apt_generated
.classpath
.factorypath
.project
.settings
.springBeans
.sts4-cache
### IntelliJ IDEA ###
.idea
*.iws
*.iml
*.ipr
### NetBeans ###
/nbproject/private/
/nbbuild/
/dist/
/nbdist/
/.nb-gradle/
build/
!**/src/main/**/build/
!**/src/test/**/build/
### VS Code ###
.vscode/
.flattened-pom.xml

View File

@ -0,0 +1,59 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.aizuda</groupId>
<artifactId>snail-job-datasource</artifactId>
<version>${revision}</version>
<relativePath>../pom.xml</relativePath>
</parent>
<artifactId>snail-job-dm8-datasource</artifactId>
<name>snail-job-dm8-datasource</name>
<description>snail-job-dm8-datasource</description>
<packaging>jar</packaging>
<properties>
<java.version>17</java.version>
<dameng.version>8.1.3.62</dameng.version>
</properties>
<dependencies>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.aizuda</groupId>
<artifactId>snail-job-datasource-template</artifactId>
</dependency>
<dependency>
<groupId>com.dameng</groupId>
<artifactId>DmJdbcDriver18</artifactId>
<version>${dameng.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<mainClass>none</mainClass> <!-- 取消查找本项目下的Main方法为了解决Unable to find main class的问题 -->
<classifier>execute</classifier> <!-- 为了解决依赖模块找不到此模块中的类或属性 -->
</configuration>
<executions>
<execution>
<goals>
<goal>repackage</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.JobLogMessageMapper">
<!-- 定义批量新增的 SQL 映射 -->
<insert id="insertBatch" parameterType="java.util.List">
INSERT INTO sj_job_log_message (namespace_id, group_name, job_id, task_batch_id, task_id,
log_num, message, create_dt, real_time)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.namespaceId},
#{item.groupName},
#{item.jobId},
#{item.taskBatchId},
#{item.taskId},
#{item.logNum},
#{item.message},
#{item.createDt},
#{item.realTime}
)
</foreach>
</insert>
</mapper>

View File

@ -0,0 +1,13 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.JobMapper">
<update id="updateBatchNextTriggerAtById" parameterType="java.util.List">
<foreach collection="list" item="item" open="BEGIN" separator=";" close=";END;">
UPDATE sj_job
SET next_trigger_at = #{item.nextTriggerAt}
WHERE id = #{item.id}
</foreach>
</update>
</mapper>

View File

@ -0,0 +1,94 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.JobSummaryMapper">
<insert id="insertBatch" parameterType="java.util.List" useGeneratedKeys="false" keyProperty="id">
INSERT INTO sj_job_summary (namespace_id, group_name, business_id, trigger_at, system_task_type,
success_num,fail_num,fail_reason,stop_num,stop_reason, cancel_num,cancel_reason)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.namespaceId},
#{item.groupName},
#{item.businessId},
#{item.triggerAt},
#{item.systemTaskType},
#{item.successNum},
#{item.failNum},
#{item.failReason},
#{item.stopNum},
#{item.stopReason},
#{item.cancelNum},
#{item.cancelReason}
)
</foreach>
</insert>
<update id="updateBatch" parameterType="java.util.List">
<foreach collection="list" item="item" index="index" open="BEGIN" separator=";" close=";END;">
UPDATE sj_job_summary
SET success_num = #{item.successNum},
fail_num = #{item.failNum},
fail_reason = #{item.failReason},
stop_num = #{item.stopNum},
stop_reason = #{item.stopReason},
cancel_num = #{item.cancelNum},
cancel_reason = #{item.cancelReason}
WHERE system_task_type = #{item.systemTaskType}
AND business_id = #{item.businessId}
AND trigger_at = #{item.triggerAt}
</foreach>
</update>
<select id="selectJobLineList"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardLineResponseDO">
SELECT createDt AS createDt,
NVL(SUM(success_num), 0) AS success,
NVL(SUM(fail_num), 0) AS failNum,
NVL(SUM(stop_num), 0) AS stop,
NVL(SUM(cancel_num), 0) AS cancel,
NVL(SUM(fail_num + stop_num + cancel_num), 0) AS fail,
NVL(SUM(success_num + fail_num + stop_num + cancel_num), 0) AS total
FROM (
SELECT
TO_CHAR(trigger_at, #{dateFormat}) AS createDt,
success_num,
fail_num,
stop_num,
cancel_num
FROM sj_job_summary
${ew.customSqlSegment}
)
GROUP BY createDt
</select>
<select id="selectJobTask"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardCardResponseDO$JobTask">
SELECT NVL(sum(success_num), 0) AS successNum,
NVL(sum(stop_num), 0) AS stopNum,
NVL(sum(cancel_num), 0) AS cancelNum,
NVL(sum(fail_num), 0) AS failNum,
NVL(sum(success_num + fail_num + stop_num + cancel_num), 0) AS totalNum
FROM sj_job_summary
${ew.customSqlSegment}
</select>
<select id="selectDashboardRankList"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardRetryLineResponseDO$Rank">
SELECT * FROM (
SELECT
<if test="systemTaskType == 3">
group_name || '/' || (SELECT job_name FROM sj_job WHERE id = business_id) AS name,
</if>
<if test="systemTaskType == 4">
group_name || '/' || (SELECT workflow_name FROM sj_workflow WHERE id = business_id) AS name,
</if>
SUM(fail_num) AS total
FROM sj_job_summary
${ew.customSqlSegment}
HAVING SUM(fail_num) > 0
ORDER BY total DESC)
WHERE ROWNUM <![CDATA[ <= ]]> 10
</select>
</mapper>

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.RetryDeadLetterMapper">
<insert id="insertBatch" parameterType="java.util.List" useGeneratedKeys="false" keyProperty="id">
INSERT INTO sj_retry_dead_letter (namespace_id, unique_id, group_name, scene_name,
idempotent_id, biz_no, executor_name, args_str,
ext_attrs, create_dt)
VALUES
<foreach collection="retryDeadLetters" item="retryDeadLetter" separator=",">
(
#{retryDeadLetter.namespaceId,jdbcType=VARCHAR}, #{retryDeadLetter.uniqueId,jdbcType=VARCHAR},
#{retryDeadLetter.groupName,jdbcType=VARCHAR}, #{retryDeadLetter.sceneName,jdbcType=VARCHAR},
#{retryDeadLetter.idempotentId,jdbcType=VARCHAR}, #{retryDeadLetter.bizNo,jdbcType=VARCHAR},
#{retryDeadLetter.executorName,jdbcType=VARCHAR}, #{retryDeadLetter.argsStr,jdbcType=VARCHAR},
#{retryDeadLetter.extAttrs,jdbcType=VARCHAR}, #{retryDeadLetter.createDt,jdbcType=TIMESTAMP}
)
</foreach>
</insert>
</mapper>

View File

@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.RetrySummaryMapper">
<insert id="insertBatch" parameterType="java.util.List" useGeneratedKeys="false" keyProperty="id">
INSERT INTO sj_retry_summary (namespace_id, group_name, scene_name, trigger_at,
running_num, finish_num, max_count_num, suspend_num)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.namespaceId},
#{item.groupName},
#{item.sceneName},
#{item.triggerAt},
#{item.runningNum},
#{item.finishNum},
#{item.maxCountNum},
#{item.suspendNum}
)
</foreach>
</insert>
<update id="updateBatch" parameterType="java.util.List">
<foreach collection="list" item="item" index="index" open="BEGIN" separator=";" close=";END;">
UPDATE sj_retry_summary
SET running_num = #{item.runningNum},
finish_num = #{item.finishNum},
max_count_num = #{item.maxCountNum},
suspend_num = #{item.suspendNum}
WHERE trigger_at = #{item.triggerAt}
AND group_name = #{item.groupName}
AND namespace_id = #{item.namespaceId}
AND scene_name = #{item.sceneName}
</foreach>
</update>
<select id="selectRetryTask"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardCardResponseDO$RetryTask">
SELECT NVL(SUM(running_num), 0) AS runningNum,
NVL(SUM(finish_num), 0) AS finishNum,
NVL(SUM(max_count_num), 0) AS maxCountNum,
NVL(SUM(suspend_num), 0) AS suspendNum,
NVL(SUM(running_num + finish_num + max_count_num + suspend_num), 0) AS totalNum
FROM sj_retry_summary
${ew.customSqlSegment}
</select>
<select id="selectRetryTaskBarList"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardCardResponseDO$RetryTask">
SELECT *
FROM
(
SELECT trigger_at,
running_num,
finish_num,
max_count_num,
suspend_num
FROM sj_retry_summary
${ew.customSqlSegment}
)
WHERE ROWNUM <![CDATA[ <= ]]> 7
</select>
<select id="selectRetryLineList"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardLineResponseDO">
SELECT createDt AS createDt,
NVL(SUM(finish_num), 0) AS successNum,
NVL(SUM(running_num), 0) AS runningNum,
NVL(SUM(max_count_num), 0) AS maxCountNum,
NVL(SUM(suspend_num), 0) AS suspendNum,
NVL(SUM(finish_num + running_num + max_count_num + suspend_num), 0) AS total
FROM (
SELECT
TO_CHAR(create_dt, #{dateFormat}) AS createDt,
finish_num,
running_num,
max_count_num,
suspend_num
FROM sj_retry_summary
${ew.customSqlSegment}
)
GROUP BY createDt
</select>
<select id="selectDashboardRankList"
resultType="com.aizuda.snailjob.template.datasource.persistence.dataobject.DashboardRetryLineResponseDO$Rank">
SELECT
*
FROM (
SELECT group_name || '/' || scene_name AS name,
SUM(running_num + finish_num + max_count_num + suspend_num) AS total
FROM sj_retry_summary
${ew.customSqlSegment}
HAVING SUM(running_num + finish_num + max_count_num + suspend_num) > 0
ORDER BY SUM(running_num + finish_num + max_count_num + suspend_num) DESC)
WHERE ROWNUM <![CDATA[ <= ]]> 10
</select>
</mapper>

View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.RetryTaskLogMapper">
<!-- 定义批量新增的 SQL 映射 -->
<insert id="insertBatch" parameterType="java.util.List" useGeneratedKeys="false" keyProperty="id">
INSERT INTO sj_retry_task_log (unique_id, group_name, scene_name, idempotent_id, biz_no, executor_name,
args_str, ext_attrs, task_type, create_dt, namespace_id)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.uniqueId}, #{item.groupName}, #{item.sceneName}, #{item.idempotentId},
#{item.bizNo}, #{item.executorName}, #{item.argsStr}, #{item.extAttrs},
#{item.taskType}, #{item.createDt}, #{item.namespaceId}
)
</foreach>
</insert>
</mapper>

View File

@ -0,0 +1,30 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.RetryTaskLogMessageMapper">
<insert id="insertBatch" parameterType="java.util.List">
INSERT INTO sj_retry_task_log_message (namespace_id, group_name, unique_id, log_num, message,
create_dt, real_time)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.namespaceId},
#{item.groupName},
#{item.uniqueId},
#{item.logNum},
#{item.message},
#{item.createDt},
#{item.realTime}
)
</foreach>
</insert>
<update id="updateBatch" parameterType="java.util.List">
<foreach collection="list" item="item" index="index" open="BEGIN" separator=";" close=";END;">
UPDATE sj_retry_task_log_message
SET message = #{item.message}, log_num = #{item.logNum}
WHERE id = #{item.id}
</foreach>
</update>
</mapper>

View File

@ -0,0 +1,26 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.RetryTaskMapper">
<!-- 定义批量新增的 SQL 映射 -->
<insert id="insertBatch" parameterType="java.util.List">
INSERT INTO sj_retry_task (namespace_id, unique_id, group_name, scene_name, idempotent_id, biz_no,
executor_name, args_str, ext_attrs, next_trigger_at, task_type, retry_status, create_dt)
VALUES
<foreach collection="list" item="item" separator=",">
(
#{item.namespaceId}, #{item.uniqueId}, #{item.groupName}, #{item.sceneName}, #{item.idempotentId},
#{item.bizNo}, #{item.executorName}, #{item.argsStr}, #{item.extAttrs}, #{item.nextTriggerAt},
#{item.taskType}, #{item.retryStatus}, #{item.createDt}
)
</foreach>
</insert>
<update id="updateBatchNextTriggerAtById" parameterType="java.util.List">
<foreach collection="list" item="item" index="index" open="BEGIN" separator=";" close=";END;">
UPDATE sj_retry_task_${partition}
SET next_trigger_at = #{item.nextTriggerAt}
WHERE id = #{item.id}
</foreach>
</update>
</mapper>

View File

@ -0,0 +1,34 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.ServerNodeMapper">
<insert id="insertBatch" parameterType="java.util.List" useGeneratedKeys="false" keyProperty="id">
<!-- useGeneratedKeys="false" 否则报错ORA-00933: SQL command not properly ended -->
INSERT INTO sj_server_node (namespace_id, group_name, host_id, host_ip, host_port,
expire_at, node_type, ext_attrs, create_dt)
VALUES
<foreach collection="records" item="item" index="index" separator=",">
(
#{item.namespaceId,jdbcType=VARCHAR},
#{item.groupName,jdbcType=VARCHAR},
#{item.hostId,jdbcType=VARCHAR},
#{item.hostIp,jdbcType=VARCHAR},
#{item.hostPort,jdbcType=INTEGER},
#{item.expireAt,jdbcType=TIMESTAMP},
#{item.nodeType,jdbcType=TINYINT},
#{item.extAttrs,jdbcType=VARCHAR},
#{item.createDt,jdbcType=TIMESTAMP}
)
</foreach>
</insert>
<update id="updateBatchExpireAt" parameterType="java.util.List">
<foreach collection="list" item="item" open="BEGIN" separator=";" close=";END;">
UPDATE sj_server_node
SET expire_at = #{item.expireAt}
WHERE host_id = #{item.hostId}
AND host_ip = #{item.hostIp}
</foreach>
</update>
</mapper>

View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.aizuda.snailjob.template.datasource.persistence.mapper.WorkflowMapper">
<update id="updateBatchNextTriggerAtById" parameterType="java.util.List">
<foreach collection="list" item="item" open="BEGIN" separator=";" close=";END;">
UPDATE sj_workflow
SET next_trigger_at = #{item.nextTriggerAt}
WHERE id = #{item.id}
</foreach>
</update>
</mapper>

View File

@ -63,7 +63,7 @@ public enum DashboardLineEnum {
default:
return "yyyy-MM-dd";
}
} else { // Oracle, Postgres
} else { // Oracle, Postgres, DM
switch (mode) {
case YEAR:
return "yyyy-MM";

View File

@ -45,7 +45,8 @@ public class JdbcLockProvider implements LockStorage, Lifecycle {
DbTypeEnum.MARIADB.getDb(),
DbTypeEnum.POSTGRES.getDb(),
DbTypeEnum.ORACLE.getDb(),
DbTypeEnum.SQLSERVER.getDb());
DbTypeEnum.SQLSERVER.getDb(),
DbTypeEnum.DM.getDb());
private final DistributedLockMapper distributedLockMapper;
private final PlatformTransactionManager platformTransactionManager;

View File

@ -15,26 +15,31 @@ spring:
url: jdbc:mysql://localhost:3306/snail_job?useSSL=false&characterEncoding=utf8&useUnicode=true
username: root
password: root
## postgres
# driver-class-name: org.postgresql.Driver
# url: jdbc:postgresql://localhost:5432/snail_job?useUnicode=true&characterEncoding=utf8&useSSL=true&autoReconnect=true&reWriteBatchedInserts=true
# username: root
# password: root
## Oracle
# driver-class-name: oracle.jdbc.OracleDriver
# url: jdbc:oracle:thin:@//localhost:1521/XEPDB1
# username: snail_job
# password: SnailJob
## SQL Server 注意:由于system_user为SQLServer系统函数因此SQLServer需要启用前缀配置请配置mybatis-plus.global-config.db-config.table-prefix: er_
# driverClassName: com.microsoft.sqlserver.jdbc.SQLServerDriver
# url: jdbc:sqlserver://localhost:1433;DatabaseName=snail_job;SelectMethod=cursor;encrypt=false;rewriteBatchedStatements=true
# username: SA
# password: SnailJob@24
## postgres
# driver-class-name: org.postgresql.Driver
# url: jdbc:postgresql://localhost:5432/snail_job?useUnicode=true&characterEncoding=utf8&useSSL=true&autoReconnect=true&reWriteBatchedInserts=true
# username: root
# password: root
## Oracle
# driver-class-name: oracle.jdbc.OracleDriver
# url: jdbc:oracle:thin:@//localhost:1521/XEPDB1
# username: snail_job
# password: SnailJob
## SQL Server 注意:由于system_user为SQLServer系统函数因此SQLServer需要启用前缀配置请配置mybatis-plus.global-config.db-config.table-prefix: er_
# driverClassName: com.microsoft.sqlserver.jdbc.SQLServerDriver
# url: jdbc:sqlserver://localhost:1433;DatabaseName=snail_job;SelectMethod=cursor;encrypt=false;rewriteBatchedStatements=true
# username: SA
# password: SnailJob@24
## mariadb
# driver-class-name: org.mariadb.jdbc.Driver
# url: jdbc:mariadb://localhost:3308/snail_job?useSSL=false&characterEncoding=utf8&useUnicode=true
# username: root
# password: root
## dm8
# driver-class-name: dm.jdbc.driver.DmDriver
# url: jdbc:dm://127.0.0.1:5236
# username: SYSDBA
# password: SYSDBA001
type: com.zaxxer.hikari.HikariDataSource
hikari:
connection-timeout: 30000

View File

@ -63,6 +63,10 @@
<groupId>com.aizuda</groupId>
<artifactId>snail-job-sqlserver-datasource</artifactId>
</dependency>
<dependency>
<groupId>com.aizuda</groupId>
<artifactId>snail-job-dm8-datasource</artifactId>
</dependency>
<dependency>
<groupId>org.mapstruct</groupId>
<artifactId>mapstruct</artifactId>

View File

@ -359,42 +359,36 @@ public class GroupConfigServiceImpl implements GroupConfigService {
@Override
public List<Integer> getTablePartitionList() {
DataSource dataSource = jdbcTemplate.getDataSource();
Connection connection = null;
try {
connection = dataSource.getConnection();
try (Connection connection = dataSource.getConnection()) {
String catalog = connection.getCatalog();
String schema = connection.getSchema();
String tableNamePattern = "sj_retry_task_%";
DbTypeEnum dbType = DbUtils.getDbType();
if (DbTypeEnum.ORACLE.getDb().equals(dbType.getDb())) {
// Oracle, DM 查询表名大写
if (DbTypeEnum.ORACLE.getDb().equals(dbType.getDb()) || DbTypeEnum.DM.getDb().equals(dbType.getDb())) {
tableNamePattern = tableNamePattern.toUpperCase();
}
DatabaseMetaData metaData = connection.getMetaData();
ResultSet tables = metaData.getTables(catalog, schema, tableNamePattern, new String[]{"TABLE"});
ResultSet tableRs = metaData.getTables(catalog, schema, tableNamePattern, new String[]{"TABLE"});
// 输出表名
// 获取表名
List<String> tableList = new ArrayList<>();
while (tables.next()) {
String tableName = tables.getString("TABLE_NAME");
while (tableRs.next()) {
String tableName = tableRs.getString("TABLE_NAME");
tableList.add(tableName);
}
return tableList.stream().map(ReUtil::getFirstNumber).filter(i ->
!Objects.isNull(i)).distinct()
return tableList.stream()
.map(ReUtil::getFirstNumber)
.filter(Objects::nonNull)
.distinct()
.collect(Collectors.toList());
} catch (SQLException ignored) {
} finally {
if (Objects.nonNull(connection)) {
try {
connection.close();
} catch (SQLException ignored) {
}
}
}
return Lists.newArrayList();
return Collections.emptyList();
}
@Override
@ -411,7 +405,7 @@ public class GroupConfigServiceImpl implements GroupConfigService {
.in(GroupConfig::getGroupName, groupSet));
Assert.isTrue(CollUtil.isEmpty(configs),
() -> new SnailJobServerException("导入失败. 原因: 组{}已存在", StreamUtils.toSet(configs, GroupConfig::getGroupName)));
() -> new SnailJobServerException("导入失败. 原因: 组{}已存在", StreamUtils.toSet(configs, GroupConfig::getGroupName)));
for (final GroupConfigRequestVO groupConfigRequestVO : requestList) {
@ -432,13 +426,13 @@ public class GroupConfigServiceImpl implements GroupConfigService {
List<GroupConfigRequestVO> allRequestList = Lists.newArrayList();
PartitionTaskUtils.process((startId -> {
List<GroupConfig> groupConfigs = accessTemplate.getGroupConfigAccess().listPage(new PageDTO<>(0, 100),
new LambdaQueryWrapper<GroupConfig>()
.ge(GroupConfig::getId, startId)
.eq(GroupConfig::getNamespaceId, namespaceId)
.eq(Objects.nonNull(exportGroupVO.getGroupStatus()), GroupConfig::getGroupStatus, exportGroupVO.getGroupStatus())
.in(CollUtil.isNotEmpty(exportGroupVO.getGroupIds()), GroupConfig::getId, exportGroupVO.getGroupIds())
.likeRight(StrUtil.isNotBlank(exportGroupVO.getGroupName()), GroupConfig::getGroupName, StrUtil.trim(exportGroupVO.getGroupName()))
.orderByAsc(GroupConfig::getId)
new LambdaQueryWrapper<GroupConfig>()
.ge(GroupConfig::getId, startId)
.eq(GroupConfig::getNamespaceId, namespaceId)
.eq(Objects.nonNull(exportGroupVO.getGroupStatus()), GroupConfig::getGroupStatus, exportGroupVO.getGroupStatus())
.in(CollUtil.isNotEmpty(exportGroupVO.getGroupIds()), GroupConfig::getId, exportGroupVO.getGroupIds())
.likeRight(StrUtil.isNotBlank(exportGroupVO.getGroupName()), GroupConfig::getGroupName, StrUtil.trim(exportGroupVO.getGroupName()))
.orderByAsc(GroupConfig::getId)
).getRecords();
return groupConfigs.stream().map(GroupConfigPartitionTask::new).toList();
}), partitionTasks -> {
@ -455,6 +449,7 @@ public class GroupConfigServiceImpl implements GroupConfigService {
private static class GroupConfigPartitionTask extends PartitionTask {
// 这里就直接放GroupConfig为了后面若加字段不需要再这里在调整了
private final GroupConfig config;
public GroupConfigPartitionTask(@NotNull GroupConfig config) {
this.config = config;
setId(config.getId());