一、采用当当网elastic job,实现spingboot启动注入定时任务,任务配置在数据库,可以动态加载任务配置。

上干货

引入jar

<properties>
        <elastic-job.version>2.1.5</elastic-job.version>
    </properties>

        <!-- 引入elastic-job-lite核心模块 -->
        <dependency>
            <artifactId>elastic-job-common-core</artifactId>
            <groupId>com.dangdang</groupId>
            <version>${elastic-job.version}</version>
        </dependency>
        <dependency>
            <artifactId>elastic-job-lite-core</artifactId>
            <groupId>com.dangdang</groupId>
            <version>${elastic-job.version}</version>
        </dependency>
        <dependency>
            <artifactId>elastic-job-lite-spring</artifactId>
            <groupId>com.dangdang</groupId>
            <version>${elastic-job.version}</version>
        </dependency>

定义任务载体

package cn.quantgroup.cashloanflow.config.jobevent.job;

import cn.quantgroup.cashloanflow.service.job.ITimeJobService;
import cn.quantgroup.cashloanflow.util.DateUtil;
import com.dangdang.ddframe.job.api.ShardingContext;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.Date;
/**
 * 每一小时扫描CallbackFailRecord表
 */
@Slf4j
@Component
public class CallbackFailSimpleJob implements SimpleJob {

    @Autowired
    private ITimeJobService timeJobService;


    @Override
    public void execute(ShardingContext shardingContext) {
        log.info("[CallbackFailRecord回调失败监控][JobName:{}] [ShardingItem:{}] [ShardingTotalCount:{}] [TaskId:{}] [JobParameter:{}] [Time:{}] start",
                shardingContext.getJobName(), shardingContext.getShardingItem(), shardingContext.getShardingTotalCount(), shardingContext.getTaskId(), shardingContext.getJobParameter(), DateUtil.format(new Date(), "yyyy-MM-dd HH:mm:ss"));
         timeJobService.callbackFailSendMsg();
    }
}

注册加载定时任务

package cn.quantgroup.cashloanflow.config.jobevent;

import cn.quantgroup.cashloanflow.entity.cashloanflow.ElasticJobConfig;
import cn.quantgroup.cashloanflow.repository.cashloanflow.IElasticJobConfigRepository;
import cn.quantgroup.cashloanflow.util.ApplicationContextHolder;
import com.dangdang.ddframe.job.api.ElasticJob;
import com.dangdang.ddframe.job.api.simple.SimpleJob;
import com.dangdang.ddframe.job.config.JobCoreConfiguration;
import com.dangdang.ddframe.job.config.JobTypeConfiguration;
import com.dangdang.ddframe.job.config.simple.SimpleJobConfiguration;
import com.dangdang.ddframe.job.event.JobEventConfiguration;
import com.dangdang.ddframe.job.lite.config.LiteJobConfiguration;
import com.dangdang.ddframe.job.lite.spring.api.SpringJobScheduler;
import com.dangdang.ddframe.job.reg.zookeeper.ZookeeperRegistryCenter;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.CommandLineRunner;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.List;

/**
 * 可以动态改变任务,(JobConfigUpdateController 提供接口)
 * Created with suntao on 2019/3/19
 */
@Slf4j
@Component
public class ElasticJobLoader2 implements CommandLineRunner {

    @Autowired
    private IElasticJobConfigRepository elasticJobConfigRepository;

    @Resource
    private ZookeeperRegistryCenter zookeeperRegistryCenter;

    @Resource
    private JobEventConfiguration jobEventConfiguration;



    @Override
    public void run(String... args) {

        List<ElasticJobConfig> elasticJobConfigList = elasticJobConfigRepository.findAllActive();
        if (elasticJobConfigList == null || elasticJobConfigList.size() == 0) {
            return;
        }
        elasticJobConfigList.forEach(elasticJobConfig -> registryJob(elasticJobConfig));
        log.info("所有定时任务加载完成");
    }


    /**
     * 重新加载任务
     * @param ids
     */
    public void reloadJobByIds(List<Long> ids) {
        List<ElasticJobConfig> elasticJobConfigList = elasticJobConfigRepository.findAll(ids);
        elasticJobConfigList.forEach(elasticJobConfig -> registryJob(elasticJobConfig));
    }

    private void registryJob(ElasticJobConfig elasticJobConfigBean) {
        try {
            Class<? extends ElasticJob> jobClass = (Class<? extends ElasticJob>) Class
                    .forName(elasticJobConfigBean.getJobClass());
            ElasticJob elasticJob = getInstance(jobClass);
            SpringJobScheduler jobScheduler = jobScheduler(elasticJob, elasticJobConfigBean);
            jobScheduler.init();
            log.info("初始化定时任务 {} ", elasticJobConfigBean.toString());
        } catch (Exception e) {
            log.error("注册Job出错:{} ", elasticJobConfigBean.toString(), e);
        }

    }


    /**
     * @param jobClass
     * @return
     */
    private ElasticJob getInstance(Class<? extends ElasticJob> jobClass) {
        ElasticJob bean = ApplicationContextHolder.getBean(jobClass);
        return bean;
    }

    /**
     * 注册SpringJobScheduler
     *
     * @param elasticJob
     * @param elasticJobConfigBean
     * @return
     */
    private SpringJobScheduler jobScheduler(ElasticJob elasticJob, ElasticJobConfig elasticJobConfigBean) {
        LiteJobConfiguration build = LiteJobConfiguration.newBuilder(jobConfiguration(elasticJob, elasticJobConfigBean))
                .overwrite(true).build();
        SpringJobScheduler springJobScheduler = new SpringJobScheduler(elasticJob, zookeeperRegistryCenter, build,
                jobEventConfiguration);
        return springJobScheduler;
    }

    /**
     * job配置
     *
     * @param elasticJob
     * @param elasticJobConfigBean
     * @return
     */
    private JobTypeConfiguration jobConfiguration(final ElasticJob elasticJob,
                                                  ElasticJobConfig elasticJobConfigBean) {
        JobCoreConfiguration jobCoreConfiguration = JobCoreConfiguration
                .newBuilder(elasticJobConfigBean.getJobName(), elasticJobConfigBean.getCron(),
                        elasticJobConfigBean.getShardingTotalCount())
                .shardingItemParameters(elasticJobConfigBean.getShardingItemParameters())
                //.misfire(Boolean.valueOf(elasticJobConfigBean.getMisfire()))
                .description(elasticJobConfigBean.getDescription())
                //.failover(Boolean.valueOf(elasticJobConfigBean.getFailover()))
                .jobParameter(elasticJobConfigBean.getJobParameter())
                .build();
        if (elasticJob instanceof SimpleJob) {
            return new SimpleJobConfiguration(jobCoreConfiguration, elasticJob.getClass().getCanonicalName());
        }
//        if (elasticJob instanceof DataflowJob) {
//            return new DataflowJobConfiguration(jobCoreConfiguration, elasticJob.getClass().getCanonicalName(),
//                    Boolean.valueOf(elasticJobConfigBean.getStreamingProcess()));
//        }
        throw new RuntimeException("未知类型定时任务:" + elasticJob.getClass().getName());
    }


}

任务配置config,引入DateSource,在加载时,读取数据库任务配置表

package cn.quantgroup.cashloanflow.config.jobevent;

import com.dangdang.ddframe.job.event.JobEventConfiguration;
import com.dangdang.ddframe.job.event.rdb.JobEventRdbConfiguration;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import javax.annotation.Resource;
import javax.sql.DataSource;

/**
 * 为job配置数据源
 * Created with suntao on 2019/3/19
 */
@Configuration
public class JobEventConfig {

    @Resource
    private DataSource dataSource;

    @Bean
    public JobEventConfiguration jobEventConfiguration() {
        return new JobEventRdbConfiguration(dataSource);
    }
}

sql:

CREATE TABLE `elastic_job_config` (
  `id` int(11) NOT NULL AUTO_INCREMENT,
  `job_name` varchar(255) NOT NULL COMMENT '作业名称',
  `cron` varchar(50) NOT NULL COMMENT 'cron表达式,用于控制作业触发时间',
  `sharding_total_count` int(3) NOT NULL COMMENT '作业分片总数',
  `sharding_item_parameters` varchar(512) DEFAULT NULL COMMENT '分片序列,列号从0开始,不可大于或等于作业分片总数.如:0=a,1=b,2=c',
  `job_parameter` varchar(512) DEFAULT NULL COMMENT '作业自定义参数',
  `failover` tinyint(4) DEFAULT NULL COMMENT '是否开启任务执行失效转移',
  `misfire` tinyint(4) DEFAULT NULL COMMENT '是否开启错过任务重新执行',
  `description` varchar(512) DEFAULT NULL COMMENT '作业描述信息',
  `job_class` varchar(512) NOT NULL COMMENT '作业实现类',
  `streaming_process` tinyint(4) DEFAULT NULL COMMENT '是否流式处理数据(只有Dataflow类型支持)',
  `job_config` text,
  `is_active` tinyint(4) NOT NULL DEFAULT '1' COMMENT '是否有效,0:否,1:是,default:1',
  `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
  PRIMARY KEY (`id`),
  UNIQUE KEY `jobName` (`job_name`)
) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8;

INSERT INTO `elastic_job_config` (`job_name`, `cron`, `sharding_total_count`, `sharding_item_parameters`, `job_parameter`, `failover`, `misfire`, `description`, `job_class`, `streaming_process`, `job_config`, `is_active`, `created_at`)
VALUES
	('callbackFailSimpleJob', '0 0 0/1 * * ? ', 1, '', NULL, NULL, NULL, '回调失败监控', 'cn.quantgroup.cashloanflow.config.jobevent.job.CallbackFailSimpleJob', NULL, NULL, 1, '2019-04-02 18:05:08'),
	('orderMappingIncrementJob', '0 */10 8-23 * * ?', 1, '', '{\"monitorDays\":7, \"fluctuate\":{\"low\":0.6, \"up\":2, \"absoluteValue\":10}, \"scanMins\":10, \"channelIdList\":[333,158748,158888,159507,159509,159513,159537,159538,159541,159561,159562,159563,159583,159584,159672,159726]}', NULL, NULL, '进件流量监控任务', 'cn.quantgroup.cashloanflow.config.jobevent.job.OrderMappingIncrementSimpleJob', NULL, NULL, 1, '2019-04-03 14:06:35');

任务额外自定义配置可以加载job_parameter中,通过

String jobParameter = shardingContext.getJobParameter();

获取自定义配置

可以通过加载器动态加载任务,当任务需要更改配置时,服务有不想重新启动,通过自定义接口调用reloadJobByIds方法。

/**
     * 重新加载任务
     * @param ids
     */
    public void reloadJobByIds(List<Long> ids) {
        List<ElasticJobConfig> elasticJobConfigList = elasticJobConfigRepository.findAll(ids);
        elasticJobConfigList.forEach(elasticJobConfig -> registryJob(elasticJobConfig));
    }

二、通过当当elastic job二次开发,再次是应用的开发好的jar,这种方式优点是每个定时任务,通过注解方式配置,方便阅读,缺点是不能动态加载任务配置,当定时任务很多时,不方便管理。

1、引入jar

<dependency>
            <groupId>com.github.kuhn-he</groupId>
            <artifactId>elastic-job-lite-spring-boot-starter</artifactId>
            <version>2.1.5</version>
        </dependency>

该jar 是有当当网elastic job封装升级的jar

2、创建job执行类

@Component
@Slf4j
@ElasticSimpleJob(cron="* * * * * ?",jobName="test123",shardingTotalCount=2,jobParameter="测试参数",shardingItemParameters="0=A,1=B")
public class MySimpleJob implements SimpleJob {

    @Override
    public void execute(ShardingContext shardingContext) {
        log.info("itemId={}", shardingContext.getShardingItem());
    }
}

三、启动spring boot,定时任务即启动

1、shardingTotalCount=2,将定时任务分成两个片区,如果一台server执行,那么两个片区都由该server执行

打印结果:
2018-07-13 17:17:14.204  INFO 88070 --- [g.MySimpleJob-1] c.f.t.e.config.MySimpleJob               : itemId=0
2018-07-13 17:17:14.205  INFO 88070 --- [g.MySimpleJob-2] c.f.t.e.config.MySimpleJob               : itemId=1
2018-07-13 17:17:15.175  INFO 88070 --- [g.MySimpleJob-4] c.f.t.e.config.MySimpleJob               : itemId=0
2018-07-13 17:17:15.175  INFO 88070 --- [g.MySimpleJob-3] c.f.t.e.config.MySimpleJob               : itemId=1
2018-07-13 17:17:16.168  INFO 88070 --- [g.MySimpleJob-6] c.f.t.e.config.MySimpleJob               : itemId=0
2018-07-13 17:17:16.168  INFO 88070 --- [g.MySimpleJob-5] c.f.t.e.config.MySimpleJob               : itemId=1

此时只有一个server节点在zk注册,任务全部由该server执行

如果有多台server接入,如 server1,server2

那么server1得到分片itemId=0,server2分片itemid=1.

如果两台server,分成6片,或者7片

shardingItemParameters="0=A,1=B,2=C,3=D,4=E,5=F"

那么分片情况

server1:0,1,2

server2:3,4,5

或者分成7片:shardingItemParameters="0=A,1=B,2=C,3=D,4=E,5=F,6=G"

 

server1:0,1,2

server2:3,4,5,6

如果当server1或者server2宕机,那么会重新选举,重新负载任务到可用server

默认shardingTotalCount=1,由其中一台主server执行,其他为福server,当主server宕机,副server会顶替。

2、elastic-job定位为分布式定时任务,当任务数量级非常大的时候,可以根据需要加机器,解决定时任务原有串行执行效率问题,在解决中可以根据itemId对数据进行分组,解决重复执行问题。