近需要对项目实现自监控,然后我的设计需要对springboot2.0的日志进行收集分析,写这篇博客简单的记录下
第一步:我们需要在pom.xml中配置相关的maven依赖(千万记住kafka版本一定要对应 )
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.11</artifactId>
<version>0.10.1.1</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
第二步: application.yml配置
log:
config:
kafka:
# 是否将日志发送至kafka,true或false,使用时必须配置
isSend: true
# kafka的地址,使用时必须配置
bootstrapServers: 127.0.0.1:9092
# 日志发往的topic,使用时必须配置
topic: springbootLoggerInfo
# # 批量上传数目,达到该数目后发送
batchSize: 5
# # 间隔时间后发送,即使未达到批量上传最大数,间隔时间到了也会发送,单位为毫秒
lingerMs: 1000
# # 数据压缩类型
compressionType: gzip
# # 重试次数
retries: 3
最大消息大小,此处设为5M
maxRequestSize: 5242880
第三步:需要我们自己实现一个Appender
package com.unicom.admin.util;
import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.ConsoleAppender;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class KafkaAppender extends ConsoleAppender<ILoggingEvent> {
public static final Logger LOGGER = LoggerFactory.getLogger(KafkaAppender.class);
private String bootstrapServers;
private String topic;
private String batchSize;
private String lingerMs;
private String compressionType;
private String retries;
private String maxRequestSize;
private String isSend;
private Producer<String, String> producer;
@Override
public void start() {
super.start();
if ("true".equals(this.isSend)) {
if (producer == null) {
producer = KafkaUtil.createProducer(this.bootstrapServers, this.batchSize,
this.lingerMs, this.compressionType, this.retries, this.maxRequestSize);
}
}
}
@Override
public void stop() {
super.stop();
if ("true".equals(this.isSend)) {
this.producer.close();
}
LOGGER.info("Stopping kafkaAppender...");
}
@Override
protected void append(ILoggingEvent eventObject) {
byte[] byteArray;
String log;
// 对日志格式进行解码
byteArray = this.encoder.encode(eventObject);
log = new String(byteArray);
ProducerRecord<String, String> record = new ProducerRecord<>(this.topic, log);
if (eventObject.getMarker() == null && "true".equals(this.isSend)) {
//如果需要进行分析日志,可以对record进行数据结构重构下
producer.send(record, new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception != null) {
LOGGER.error("Send log to kafka failed: [{}]", log);
}
}
});
}
}
public String getBootstrapServers() {
return bootstrapServers;
}
public void setBootstrapServers(String bootstrapServers) {
this.bootstrapServers = bootstrapServers;
}
public String getTopic() {
return topic;
}
public void setTopic(String topic) {
this.topic = topic;
}
public String getBatchSize() {
return batchSize;
}
public void setBatchSize(String batchSize) {
this.batchSize = batchSize;
}
public String getLingerMs() {
return lingerMs;
}
public void setLingerMs(String lingerMs) {
this.lingerMs = lingerMs;
}
public String getCompressionType() {
return compressionType;
}
public void setCompressionType(String compressionType) {
this.compressionType = compressionType;
}
public String getRetries() {
return retries;
}
public void setRetries(String retries) {
this.retries = retries;
}
public String getMaxRequestSize() {
return maxRequestSize;
}
public void setMaxRequestSize(String maxRequestSize) {
this.maxRequestSize = maxRequestSize;
}
public Producer<String, String> getProducer() {
return producer;
}
public void setProducer(Producer<String, String> producer) {
this.producer = producer;
}
public String getIsSend() {
return isSend;
}
public void setIsSend(String isSend) {
this.isSend = isSend;
}
}
第四步 KafkaUtil
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import java.util.Properties;
public class KafkaUtil {
public static Producer<String, String> createProducer(
String bootstrapServers, String batchSize, String lingerMs,
String compressionType, String retries, String maxRequestSize, String username, String password) {
// 当配置项为IS_UNDEFINED时,使用默认值
if (bootstrapServers == null) {
bootstrapServers = "kafka地址";
}
if (batchSize.contains("IS_UNDEFINED")) {
batchSize = "50000";
}
if (lingerMs.contains("IS_UNDEFINED")) {
lingerMs = "60000";
}
if (retries.contains("IS_UNDEFINED")) {
retries = "3";
}
if (maxRequestSize.contains("IS_UNDEFINED")) {
maxRequestSize = "5242880";
}
Properties properties = new Properties();
// kafka地址,集群用逗号分隔开
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
// acks取值:
// 0: kafka不返回确认信息,不保证record是否被收到,因为没有返回所以重试机制不会生效
// 1: partition leader确认record写入到日志中,但不保证信息是否被正确复制
// all: leader会等待所有信息被同步后返回确认信息
properties.put(ProducerConfig.ACKS_CONFIG, "1");
properties.put(ProducerConfig.RETRIES_CONFIG, Integer.valueOf(retries));
// 批量发送,当达到batch size最大值触发发送机制(10.0后支持批量发送)
properties.put(ProducerConfig.BATCH_SIZE_CONFIG, Integer.valueOf(batchSize));
// 该配置是指在batch.size数量未达到时,指定时间内也会推送数据
properties.put(ProducerConfig.LINGER_MS_CONFIG, Integer.valueOf(lingerMs));
// 配置缓存
properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
if (!compressionType.contains("IS_UNDEFINED")) {
// 指定压缩算法
properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
}
// 每个请求的消息大小
properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, Integer.valueOf(maxRequestSize));
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer");
if (!"".equals(username)) {
properties.put("security.protocol", "SASL_PLAINTEXT");
properties.put("sasl.mechanism", "PLAIN");
properties.put("sasl.jaas.config",
"org.apache.kafka.common.security.scram.ScramLoginModule required username=\"" + "用戶名" + "\" password=\"" + "密碼" + "\";");
}
return new KafkaProducer<String, String>(properties);
}
}
第五步 logback.xml配置:
<?xml version="1.0" encoding="utf-8" ?>
<!--日志配置 效果:不同级别日志配置到不同的日志文件中,且只输出自身级别的日志信息
-->
<!--
日志级别查看 shift+ctrl+r ===> level 数值越大级别越高,重要性越大
ERROR(40, "ERROR"),
WARN(30, "WARN"),
INFO(20, "INFO"),
DEBUG(10, "DEBUG"),
TRACE(0, "TRACE");
-->
<configuration>
<springProfile name="dev | loc">
<!-- configuration to be enabled when the "dev" or "staging" profiles are active -->
</springProfile>
<property name="appname" value="system-management"/>
<!--配置日志输出格式-->
<!-- 简单配置项 将日志输出到控制台-->
<appender name="consoleLog" class="ch.qos.logback.core.ConsoleAppender"> <!--ConsoleAppender,这个Appender将日志输出到console,更准确的说是System.out 或者System.err。-->
<encoder>
<!-- <pattern>%red(%d{yyyy-MM-dd HH:mm:ss.SSS}) %highlight(%-5level) %green([%thread]) %boldMagenta(%logger{10}) - %cyan(%msg%n)</pattern> -->
<pattern>
{
"timestamp":"%date{yyyy-MM-dd HH:mm:ss.SSS}",
"app": "${APP_NAME}","thread": "%thread", "logLevel": "%level", "message": "%message","logger":"%logger"
}\n
</pattern>
<!-- 控制台也要使用UTF-8,不要使用GBK,否则会中文乱码 -->
<charset>UTF-8</charset>
</encoder>
</appender>
<!-- 配置debug级别日志,实现分离文件日志配置 -->
<appender name="fileDebugLog" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!--RollingFileAppender继承了FileAppender,提供了日志自动切换功能-->
<filter class="ch.qos.logback.classic.filter.LevelFilter"> <!-- 设置过滤器掉了指定级别的日志-->
<level>DEBUG</level> <!--设置拦截的对象为DEBUG级别日志-->
<onMatch>ACCEPT</onMatch> <!-- 当遇到了DEBUG级别时,启用改段配置.-->
<onMismatch>DENY</onMismatch> <!-- 没有遇到DEBUG级别日志时,屏蔽改段配置-->
</filter>
<encoder>
<pattern>
%d - %c- %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>./log/${appname}/debug.%d.log</fileNamePattern>
</rollingPolicy>
</appender>
<!--分离配置Info级别日志-->
<appender name="fileInfoLog" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter"> <!--由于error的级别比info高,直接过滤不行,这里需要拦截掉error日志-->
<level>INFO</level> <!--设置拦截的对象为INFO级别日志-->
<onMatch>ACCEPT</onMatch> <!-- 当遇到了info级别时,启用改段配置.-->
<onMismatch>DENY</onMismatch> <!-- 没有遇到info级别日志时,屏蔽改段配置-->
</filter>
<encoder>
<pattern>
%d - %c - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<!--设置滚动策略 按时间策略来生成日志-->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--配置生成路径-->
<fileNamePattern>./log/${appname}/info.%d.log</fileNamePattern>
</rollingPolicy>
</appender>
<!-- 配置error级别日志,单独生成日志文件,跟info日志分离开来,且info日志文件只显示info级别日志,error级别日志只显示error日志 -->
<!--日志分离需要用来过滤 filter-->
<appender name="fileErrorLog" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>ERROR</level> <!-- ThresholdFilter过滤器,只有级别比ERROR高的日志才能被输出到error.%d.log文件中-->
</filter>
<encoder>
<pattern>
%d - %c - %msg%n
</pattern>
<charset>UTF-8</charset>
</encoder>
<!--设置滚动策略 按时间策略来生成日志-->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--配置生成路径-->
<fileNamePattern>./log/${appname}/error.%d.log</fileNamePattern>
</rollingPolicy>
</appender>
<!-- 读取配置文件中kafka的信息 -->
<springProperty scope="context" name="isSend"
source="log.config.kafka.isSend" defalutValue="false"/>
<springProperty scope="context" name="bootstrapServers"
source="log.config.kafka.bootstrapServers" defalutValue="localhost:9092"/>
<springProperty scope="context" name="topic"
source="log.config.kafka.topic" defalutValue="springbootLoggerInfo"/>
<springProperty scope="context" name="batchSize"
source="log.config.kafka.batchSize" defalutValue="1"/>
<springProperty scope="context" name="lingerMs"
source="log.config.kafka.lingerMs" defalutValue="1000"/>
<springProperty scope="context" name="compressionType"
source="log.config.kafka.compressionType" defalutValue="gzip"/>
<springProperty scope="context" name="retries"
source="log.config.kafka.retries" defalutValue="3"/>
<springProperty scope="context" name="maxRequestSize"
source="log.config.kafka.maxRequestSize" defalutValue="5242880"/>
<appender name="KAFKA" class="com.unicom.admin.util.KafkaAppender" >
<!-- encoder必须配置, 日志格式 -->
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--<pattern>-->
<!--%red(%d{yyyy-MM-dd HH:mm:ss.SSS}) %highlight(%-5level) %green([%thread]) %boldMagenta(%logger{10}) - %cyan(%msg%n)-->
<!--</pattern>-->
<!--为了便于分析将日志数据转为json格式-->
<pattern>
{
"timestamp":"%date{yyyy-MM-dd HH:mm:ss.SSS}",
"app": "${APP_NAME}","thread": "%thread", "logLevel": "%level", "message": "%message","logger":"%logger"
}\n
</pattern>
<!-- 控制台也要使用UTF-8,不要使用GBK,否则会中文乱码 -->
<charset>UTF-8</charset>
<!--<pattern>-->
<!--{-->
<!--"timestamp":"%date{yyyy-MM-dd HH:mm:ss.SSS}",-->
<!--"app": "${APP_NAME}",-->
<!--"logLevel": "%level",-->
<!--"message": "%message"-->
<!--}\n-->
<!--</pattern>-->
</encoder>
<bootstrapServers>${bootstrapServers}</bootstrapServers>
<topic>${topic}</topic>
<batchSize>${batchSize}</batchSize>
<lingerMs>${lingerMs}</lingerMs>
<compressionType>${compressionType}</compressionType>
<retries>${retries}</retries>
<maxRequestSize>${maxRequestSize}</maxRequestSize>
<isSend>${isSend}</isSend>
</appender>
<!--<!– 使用logback-kafka-appender 当日志级别配为debug时,请使用该配置,不要使用root –>-->
<!--<logger name="com.demo.log2kafka" level="DEBUG">-->
<!--<appender-ref ref="KAFKA"/>-->
<!--</logger>-->
<!-- 配置项用处,设置这个项目下,日志级别为debug (引用一上日志配置) -->
<root level="info"> <!--就是说在整个项目中,日志级别在info一上的日志都打印-->
<appender-ref ref="fileInfoLog"/>
<appender-ref ref="fileErrorLog"/>
<appender-ref ref="KAFKA"/>
</root>
<root level="info"> <!--就是说在整个项目中,日志级别在info一上的日志都打印-->
<appender-ref ref="fileDebugLog"/>
<appender-ref ref="consoleLog"/>
<appender-ref ref="KAFKA"/>
</root>
</configuration>
最后说一句logback最主要是关注写入kafka的配置,我这里用的是logback-spring.xml方便版本管理,具体可以看springboot中文文档对logback-spring.xml说明,还有一点就是日志最好是json输出,这样我们方便分析