1 配置druid数据源

springboot默认使用数据源Hikari,Druid(德鲁伊)是阿里巴巴开发的号称为监控而生的数据库连接池,在功能、性能、扩展性方面,都超过其他数据库连接池,同时加入了日志监控,可以很好的监控DB池连接和SQL的执行情况。

1.1 引入druid依赖:

<!-- https://mvnrepository.com/artifact/com.alibaba/druid -->
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid</artifactId>
            <version>1.1.21</version>
        </dependency>

 1.2 配置application.yml

spring:
  datasource:
    username: root
    password: 
    url: jdbc:mysql://localhost:3306/jdbc?serverTimezone=UTC&userUnicode=true&characterEncoding=utf-8
    driver-class-name: com.mysql.cj.jdbc.Driver
    type: com.alibaba.druid.pool.DruidDataSource #指定druid数据源


    #druid 数据源专有配置
    initialSize: 5
    minIdle: 5
    maxActive: 20
    maxWait: 60000
    timeBetweenEvictionRunsMillis: 60000
    minEvictableIdleTimeMillis: 300000
    timeBetweenLogStatsMillis: 30000 #30s输出一次log
    validationQuery: SELECT 1 FROM DUAL
    testWhileIdle: true
    testOnBorrow: false
    testOnReturn: false
    poolPreparedStatements: false #不缓存

    #配置监控统计拦截的filters,stat:监控统计、slf4j:日志记录、wall:防御sql注入
    filters: stat,wall,slf4j
    maxPoolPreparedStatementPerConnectionSize: 20
    useGlobalDataSourceStat: false
    connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500

 

注意:如果想要把druid的监控输入到日志中,timeBetweenLogStatsMillis必须要设置,useGlobalDtaSourceStat要设置为false。(不然会提示不支持)。配置timeBetweenLogStatsMillis>0之后,DruidDataSource会定期把监控数据输出到日志中。但是每次输出日志会导致清零(reset)连接池相关的计数器。

附上评论提的这个问题:

druid监控账号密码配置 druid 监控_ide

 

 

1.3 配置druid

@Configuration
public class DruidConfig {

    @ConfigurationProperties(prefix="spring.datasource")
    @Bean
    public DataSource druidDataSource()
    {
        DruidDataSource dataSource = new DruidDataSource();
        dataSource.setStatLogger(new MyStatLogger());
        return dataSource;
    }

    //后台监控功能 druid
    @Bean
    //因为SpringBoot内置了servlet容器,所以没有web.xml,替代方法就是注册ServletRegistrationBean
    public ServletRegistrationBean statViewServlet()
    {
        ServletRegistrationBean<StatViewServlet> bean=new ServletRegistrationBean<>(new StatViewServlet(),"/druid/*");

        //后台需要有人登录监控
        HashMap<String,String> initParameters=new HashMap<>();

        //增加配置
        initParameters.put("loginUsername","admin");
        initParameters.put("loginPassword","123456");

        //允许谁能访问
        initParameters.put("allow"," ");

        bean.setInitParameters(initParameters);//设置初始化参数
        return bean;
    }

    @Bean
    //filter
    public FilterRegistrationBean webStatFilter()
    {
        FilterRegistrationBean bean=new FilterRegistrationBean();
        bean.setFilter(new WebStatFilter());

        //可以过滤哪些请求呢
        HashMap<String,String> initParameters=new HashMap<>();

        //这些东西不进行统计
        initParameters.put("exclusions","*.js,*.css,/druid/*");

        bean.setInitParameters(initParameters);

        return bean;
    }



}

1.4 自定义StatLogger

DruidDataSource是通过com.alibaba.druid.pool.DruidDataSourceStatLoggerImpl.DruidDataSourceStatLoggerImpl来实现输入监控数据到日志的,我们可以实现DruidDataSourceStatLogger,来自定义StatLogger,例如:

public class MyStatLogger implements DruidDataSourceStatLogger {

    private static Log LOG    = LogFactory.getLog(DruidDataSourceStatLoggerImpl.class);

    private Log        logger = LOG;

    public MyStatLogger(){
        this.configFromProperties(System.getProperties());
    }
    @Override
    public void configFromProperties(Properties properties) {
        String property = properties.getProperty("druid.stat.loggerName");
        if (property != null && property.length() > 0) {
            setLoggerName(property);
        }
    }

    public Log getLogger() {
        return logger;
    }

    @Override
    public void setLoggerName(String loggerName) {
        logger = LogFactory.getLog(loggerName);
    }

    @Override
    public void setLogger(Log logger) {
        if (logger == null) {
            throw new IllegalArgumentException("logger can not be null");
        }
        this.logger = logger;
    }

    public boolean isLogEnable() {
        return logger.isInfoEnabled();
    }

    public void log(String value) {
        logger.info(value);
    }

    @Override
    public void log(DruidDataSourceStatValue statValue) {
        if (!isLogEnable()) {
            return;
        }
        Map<String, Object> map = new LinkedHashMap<String, Object>();

        map.put("dbType", statValue.getDbType());
        map.put("name", statValue.getName());
        map.put("activeCount", statValue.getActiveCount());

        if (statValue.getActivePeak() > 0) {
            map.put("activePeak", statValue.getActivePeak());
            map.put("activePeakTime", statValue.getActivePeakTime());
        }
        map.put("poolingCount", statValue.getPoolingCount());
        if (statValue.getPoolingPeak() > 0) {
            map.put("poolingPeak", statValue.getPoolingPeak());
            map.put("poolingPeakTime", statValue.getPoolingPeakTime());
        }
        map.put("connectCount", statValue.getConnectCount());
        map.put("closeCount", statValue.getCloseCount());

        if (statValue.getWaitThreadCount() > 0) {
            map.put("waitThreadCount", statValue.getWaitThreadCount());
        }

        if (statValue.getNotEmptyWaitCount() > 0) {
            map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount());
        }

        if (statValue.getNotEmptyWaitMillis() > 0) {
            map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis());
        }

        if (statValue.getLogicConnectErrorCount() > 0) {
            map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount());
        }

        if (statValue.getPhysicalConnectCount() > 0) {
            map.put("physicalConnectCount", statValue.getPhysicalConnectCount());
        }

        if (statValue.getPhysicalCloseCount() > 0) {
            map.put("physicalCloseCount", statValue.getPhysicalCloseCount());
        }

        if (statValue.getPhysicalConnectErrorCount() > 0) {
            map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount());
        }

        if (statValue.getExecuteCount() > 0) {
            map.put("executeCount", statValue.getExecuteCount());
        }

        if (statValue.getErrorCount() > 0) {
            map.put("errorCount", statValue.getErrorCount());
        }

        if (statValue.getCommitCount() > 0) {
            map.put("commitCount", statValue.getCommitCount());
        }

        if (statValue.getRollbackCount() > 0) {
            map.put("rollbackCount", statValue.getRollbackCount());
        }

        if (statValue.getPstmtCacheHitCount() > 0) {
            map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount());
        }

        if (statValue.getPstmtCacheMissCount() > 0) {
            map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount());
        }

        if (statValue.getStartTransactionCount() > 0) {
            map.put("startTransactionCount", statValue.getStartTransactionCount());
            map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram()));
        }

        if (statValue.getConnectCount() > 0) {
            map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram()));
        }

        if (statValue.getClobOpenCount() > 0) {
            map.put("clobOpenCount", statValue.getClobOpenCount());
        }

        if (statValue.getBlobOpenCount() > 0) {
            map.put("blobOpenCount", statValue.getBlobOpenCount());
        }

        if (statValue.getSqlSkipCount() > 0) {
            map.put("sqlSkipCount", statValue.getSqlSkipCount());
        }
        if (!isLogEnable()) {
            return;
        }
        //Map<String, Object> map = new LinkedHashMap<String, Object>();
        myArrayList<Map<String, Object>> sqlList = new myArrayList<Map<String, Object>>();

        //有执行sql的话 只显示sql语句
        if (statValue.getSqlList().size() > 0) {
            for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) {
                Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>();
                sqlStatMap.put("执行了sql语句: ", sqlStat.getSql());
                sqlList.add(sqlStatMap);
                String text = sqlList.toString();
                log(text);
            }
        }
          //没有sql语句的话就显示最上面那些
           else{
               String text = map.toString();
               log(text);
            }
        }

    }


//重写一下toString,去掉讨厌的框
 class myArrayList<E> extends  ArrayList<E>
{
    @Override
    public String toString() {
        Iterator<E> it = iterator();
        if (! it.hasNext())
            return "";

        StringBuilder sb = new StringBuilder();
        for (;;) {
            E e = it.next();
            sb.append(e == this ? "(this Collection)" : e);
            if (! it.hasNext())
                return sb.toString();
            sb.append(',').append(' ');
        }
    }
}

输出效果就是一些简单的慢sql,去掉了原实现类的一些东西。

以上内容参考官方github:https://github.com/alibaba/druid/wiki

可以访问druid的web页面:http://localhost:8080/druid/login.html

登录后可以在SQL监控里看到对数据的操作,还有其他各种功能

druid监控账号密码配置 druid 监控_spring_02

 

2 配置logback

logback不用引入依赖,springboot默认使用logback,可以看下自己的有没有

druid监控账号密码配置 druid 监控_druid监控账号密码配置_03

 

2.1  配置application.yml

如果想用logback记录druid的监控记录,

filters: stat,wall,slf4j

这里的日志记录需要设置为slf4j。

 

2.2 配置logback-spring.xml

这里是logback的配置文件,默认了解,如果不了解可以看一下下面这些文章:



<?xml version="1.0" encoding="UTF-8"?>
<!--debug属性配置是否打印日志框架本身的运行情况信息-->
<configuration debug="false">
    <!--自定义日志存放目录-->
    <property name="LOG_HOME" value="src/log/" />

    <!--控制台输出日志-->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%highlight(%d{yyyy-MM-dd HH:mm}) %boldYellow([%thread]) %-5level  %boldCyan([%logger{50}[%L]]) %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--INFO日志文件输出-->
    <appender name="FILE_INFO"  class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!--当前日志输出文件位置,目录不存在时会自动创建-->
        <file>${LOG_HOME}/INFO.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!--每天滚动,满足条件时滚动生成的文件名格式-->
            <fileNamePattern>${LOG_HOME}/service.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <!--该日志的最大数量-->
            <MaxHistory>120</MaxHistory>
            <!--按文件大小滚动,当某一天的日志文件大于100m时滚动,生成一个新的文件来存放日志,结合上面的每天滚动一起使用,可以避免日志文件过大不利于查找-->
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <!--日志输出格式,编码以及信息格式-->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm} [%thread] %-5level %logger{50}[%L] - %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--ERROR日志文件输出-->
    <appender name="FILE_ERROR"  class="ch.qos.logback.core.rolling.RollingFileAppender">
        <file>${LOG_HOME}/ERROR.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${LOG_HOME}/error.%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <MaxHistory>120</MaxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <!-- 过滤日志,只输出error等级的日志-->
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>ERROR</level>
        </filter>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{50}[%L] - %msg%n</pattern>
            <charset>UTF-8</charset>
        </encoder>
    </appender>

    <!--DRUID日志输出-->
    <appender name="FILE_DRUID" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在记录的日志文件的路径及文件名 -->
        <file>${LOG_HOME}/DRUID.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${LOG_PATH}/${APPDIR}/info/log-druid-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <!-- 追加方式记录日志 -->
        <append>true</append>
        <!-- 日志文件的格式 -->
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss} %-5level %logger Line:%-3L - %msg%n</pattern>
            <charset>utf-8</charset>
        </encoder>
        <!-- 过滤日志-->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
        </filter>
    </appender>



    <!--logger用来配置绑定某个包或者类下的日志输出等级,以及使用哪一个appender来输出日志-->
    <!--additivity配置是否向上级logger传递打印信息行为,该logger的上级为root,默认true-->
    <!--我的实现类-->
    <logger name="com.lane.controller" level="DEBUG" additivity="false">
        <appender-ref ref="STDOUT"/>
        <appender-ref ref="FILE_DRUID"/>
    </logger>


    <!--顶级logger-->
    <root level="INFO">
        <appender-ref ref="STDOUT" />
        <appender-ref ref="FILE_INFO" />
        <appender-ref ref="FILE_ERROR" />
    </root>


</configuration>

一共生成三个文件:

  •   INFO.log记录INFO级别的内容
  • ERROR.log记录ERROR级别的内容
  • DRUID.log记录Druid监控的SQL相关的内容和JDBC操作的说明。
  • 这些记录内容均会输出到控制台方便查看。

注:我的日志类MyStatLogger是放在com.lane.controller这个包下的,所以我扫描这个包就可以了,如果没有自己的日志类,直接用druid本身日志监控实现类的话

加上DruidDataSourceStatLoggerImpl就可以。

<!--druid自己的监控日志实现类-->
    <logger name="com.alibaba.druid.pool.DruidDataSourceStatLoggerImpl" level="DEBUG" additivity="false">
        <appender-ref ref="STDOUT"/>
        <appender-ref ref="FILE_DRUID"/>
    </logger>

druid.log效果如图:

druid监控账号密码配置 druid 监控_ide_04