我这里是使用docker-compose 安装的,所以首先要安装docker docker-compose
1、去github去下载 ELK的 docker-compose文件 地址 : deviantony/docker-elk: The Elastic stack (ELK) powered by Docker and Compose. (github.com)
2、修改对应的文件,修改之后的yml
version: '3.2'
services:
elasticsearch:
build:
context: elasticsearch/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./elasticsearch/config/elasticsearch.yml
target: /usr/share/elasticsearch/config/elasticsearch.yml
read_only: true
- type: volume
source: elasticsearch
target: /usr/share/elasticsearch/data
ports:
- "9200:9200"
- "9300:9300"
environment:
ES_JAVA_OPTS: "-Xmx256m -Xms256m"
ELASTIC_PASSWORD: luxing
# Use single node discovery in order to disable production mode and avoid bootstrap checks.
# see: https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
discovery.type: single-node
networks:
- elk
logstash:
build:
context: logstash/
args:
ELK_VERSION: $ELK_VERSION
#command: logstash -f ./logstash.conf #logstash 启动时使用的配置文件,这个配置没用
volumes:
- type: bind
source: ./logstash/config/logstash.yml
target: /usr/share/logstash/config/logstash.yml
read_only: true
- type: bind
source: ./logstash/pipeline
target: /usr/share/logstash/pipeline # logstash使用的是这下面的 logstach.conf
read_only: true
ports:
- "5044:5044"
- "5000:5000/tcp"
- "5000:5000/udp"
- "9600:9600"
- "4560:4560" # 加上端口
environment:
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
networks:
- elk
depends_on:
- elasticsearch
kibana:
build:
context: kibana/
args:
ELK_VERSION: $ELK_VERSION
volumes:
- type: bind
source: ./kibana/config/kibana.yml
target: /usr/share/kibana/config/kibana.yml
read_only: true
ports:
- "5601:5601"
networks:
- elk
depends_on:
- elasticsearch
networks:
elk:
driver: bridge
volumes:
elasticsearch:
3、修改 elasticsearch、logstash、kibana下面的配置文件
vim elasticsearch/config/elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
xpack.license.self_generated.type: trial
xpack.security.enabled: false
xpack.monitoring.collection.enabled: true
vim logstash/config/logstash.yml
http.host: "0.0.0.0" #这里都用0.0.0.0 所有机器都可以访问
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.159.128:9200" ]
## X-Pack security credentials
#
xpack.monitoring.enabled: true
#xpack.monitoring.elasticsearch.username:
#xpack.monitoring.elasticsearch.password:
vim kibana/config/kibana.yml
server.name: kibana
server.host: 0.0.0.0 # 默认
elasticsearch.hosts: [ "http://192.168.159.128:9200" ] #es地址
monitoring.ui.container.elasticsearch.enabled: true
## X-Pack security credentials
#
xpack.security.enabled: false #改为false ,不需要登录
elasticsearch.username:
elasticsearch.password:
i18n.locale: "zh-CN" # kibana中文化
4、修改 docker-elk/logstash/pipeline 下面的logstash.conf 文件
input {
beats {
port => 5044
}
tcp {
port => 5000 # springboot中logback.xml 访问的端口
}
}
## Add your filters / logstash plugins configuration here
output {
stdout {
codec => rubydebug
}
elasticsearch {
hosts => "192.168.159.128:9200"
action => "index"
index => "logstash-%{+YYYY.MM.dd}" # 这里的index用于kibana创建索引访问
}
}
5、校准时间
yum install -y ntp
6、启动 ELK
docker-compose up 启动打印日志 docker-compose up -d 不打印日志启动
7、springboot配置
1、pom文件中增加依赖
如果本来就集成了logback 就不需要引入依赖了
<dependency>
<groupId>net.logstash.logback</groupId>
<artifactId>logstash-logback-encoder</artifactId>
<version>5.1</version>
</dependency>
2、配置logback-spring.xml
<appender name="stash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<destination>192.168.159.128:5000</destination>
<!-- encoder is required -->
<encoder class="net.logstash.logback.encoder.LogstashEncoder"/>
</appender>
<springProfile name="!dev">
<root level="info">
<appender-ref ref="stash"/>
</root>
</springProfile>
7、访问es 192.168.159.128:9200
8、访问kibana 192.168.159.128:5601 在Discover创建索引
调用接口访问
参考: