1、架构规划
在上面上图当中可以看到,当需要访问ELK日志统计平台的时候,首先访问的是一个高可用的负载均衡,访问的地址是keepalived提供的VIP,当一台负载均衡服务器挂了时也是不会影响访问的,这里我使用到nginx来做负载均衡和转发,可以通过nginx将访问者的请求转发到kibana上面,再通过kibana去elasticsearch集群中获取数据,elasticsearch这里我使用了三台主机来做集群,数据的话会随机保存到任意一台elasticsearch服务器上,这里的redis服务器是用于数据的临时保存,这样的话可以避免日志量过大的时候造成的数据收集与保存不一致导致的日志丢失,可以临时保存到redis上,这里的redis可以是集群,我这里就使用单机的redis了,在redis的前面有个logstash服务器是用于去redis服务器上获取收集的日志数据并写入到elasticsearch集群中,web服务器上的日志是通过filebeat收集后再发送个另一个logstash服务器,在另一个logstash服务器上将其收集的日志写入到redis上。
2、部署web服务器并使用filebeat收集日志
2.1、部署nginx并实现filebeat收集日志
2.1.1、安装nginx
将已经编译好的nginx包放到服务器上,并安装filebeat服务
root@web1:~# mkdir -p /data/service/
root@web1:~# cd /data/service/
root@web1:/data/service# ls
nginx-1.22.tar.gz
root@web1:/data/service# tar xf nginx-1.22.tar.gz
root@web1:/data/service# ls
nginx nginx-1.22.tar.gz
root@web1:/data/service# cd nginx/
root@web1:/data/service/nginx# ls
client_body_temp conf conf.d fastcgi_temp html logs nginx.service proxy_temp sbin scgi_temp uwsgi_temp
root@web1:/data/service/nginx# ln -s /data/service/nginx/sbin/nginx /usr/bin/nginx
root@web1:/data/service/nginx# mv nginx.service /lib/systemd/system/nginx.service
root@web1:/data/service/nginx# vim /lib/systemd/system/nginx.service
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
ExecStartPre=/data/service/nginx/sbin/nginx -t
ExecStart=/data/service/nginx/sbin/nginx
ExecReload=/data/service/nginx/sbin/nginx -s reload
ExecStop=/data/service/nginx/sbin/nginx -s stop
LimitNOFILE=infinity
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
root@web1:/data/service/nginx# vi conf/nginx.conf
#在http语句块中添加下面配置
log_format access_json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"clientip":"$remote_addr",'
'"size":$body_bytes_sent,'
'"reponsetime":"$request_time",'
'"upstreamtime":"$upstream_response_time",'
'"upstreamaddr":"$upstream_addr",'
'"request_method":"$request_method",'
'"scheme":"$scheme",'
'"domain":"$server_name",'
'"referer":"$http_referer",'
'"request":"$request_uri",'
'"requesturl":"$request",'
'"status": "$status"}';
root@web1:/data/service/nginx# vi conf.d/myweb.conf
server {
listen 80;
server_name 10.0.0.106;
access_log /data/service/nginx/logs/web_access.log access_json;
location /web {
root /data/service/nginx/html;
index index.html index.htm;
}
}
root@web1:/data/service/nginx# mkdir html/web
root@web1:/data/service/nginx# cd html/web/
root@web1:/data/service/nginx/html/web# ls
css favicon.ico index.html
root@web1:/data/service/nginx# systemctl enable nginx.service
root@web1:/data/service/nginx# nginx -t #在启动nginx服务前可以检查一下nginx的配置文件的语法是否有错
nginx: the configuration file /data/service/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /data/service/nginx/conf/nginx.conf test is successful
root@web1:/data/service/nginx# systemctl start nginx.service
#验证web界面是否可以正常访问,正常访问是如下图,并验证web的访问日志
2.1.2、安装filebeat
root@web1:~# wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.9-amd64.deb
root@web1:~# dpkg -i filebeat-7.17.9-amd64.deb
root@web1:~# vi /etc/filebeat/filebeat.yml
root@web1:~# grep -Ev "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /data/service/nginx/logs/web_access.log
fields:
project: nginx-web-accesslog
- type: log
enabled: true
paths:
- /data/service/nginx/logs/error.log
fields:
project: nginx-errorlog
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
output.logstash:
hosts: ["10.0.0.105:5066","10.0.0.105:5088"]
enabled: true
worker: 2
compression_level: 3
loadbalance: true
root@web1:~# systemctl restart filebeat.service
2.2、部署tomcat并实现filebeat收集日志
2.2.1、安装tomcat
root@web2:~# cd /opt/
root@web2:/opt# ls
filebeat-7.17.9-amd64.deb install_tomcat_service.tar.gz
root@web2:/opt# dpkg -i filebeat-7.17.9-amd64.deb
root@web2:/opt# tar xf install_tomcat_service.tar.gz
root@web2:/opt# ls
apache-tomcat-9.0.62.tar.gz filebeat-7.17.9-amd64.deb install_tomcat.sh install_tomcat_service.tar.gz jdk-8u311-linux-x64.tar.gz
root@web2:/opt# cat install_tomcat.sh
#!/bin/bash
DIR=`pwd`
JDK_FILE="jdk-8u311-linux-x64.tar.gz"
TOMCAT_FILE="apache-tomcat-9.0.62.tar.gz"
JDK_DIR="/data/service"
TOMCAT_DIR="/data/service"
color () {
RES_COL=60
MOVE_TO_COL="echo -en \\033[${RES_COL}G"
SETCOLOR_SUCCESS="echo -en \\033[1;32m"
SETCOLOR_FAILURE="echo -en \\033[1;31m"
SETCOLOR_WARNING="echo -en \\033[1;33m"
SETCOLOR_NORMAL="echo -en \E[0m"
echo -n "$2" && $MOVE_TO_COL
echo -n "["
if [ $1 = "success" -o $1 = "0" ] ;then
${SETCOLOR_SUCCESS}
echo -n $" OK "
elif [ $1 = "failure" -o $1 = "1" ] ;then
${SETCOLOR_FAILURE}
echo -n $"FAILED"
else
${SETCOLOR_WARNING}
echo -n $"WARNING"
fi
${SETCOLOR_NORMAL}
echo -n "]"
echo
}
install_jdk () {
if ! [ -f "$DIR/$JDK_FILE" ];then
color 1 "$JDK_FILE 文件不存在"
exit;
elif [ -d $JDK_DIR/jdk ];then
color 1 "JDK 已经安装"
exit
else
[ -d "$JDK_DIR" ] || mkdir -pv $JDK_DIR
fi
tar xvf $DIR/$JDK_FILE -C $JDK_DIR
cd $JDK_DIR && ln -s jdk1.8.* jdk
cat > /etc/profile.d/jdk.sh <<EOF
export JAVA_HOME=$JDK_DIR/jdk
export JRE_HOME=\$JAVA_HOME/jre
export CLASSPATH=\$JAVA_HOME/lib/:\$JRE_HOME/lib/
export PATH=\$PATH:\$JAVA_HOME/bin
EOF
. /etc/profile.d/jdk.sh
java -version && color 0 "JDK 安装完成" || { color 1 "JDK 安装失败" ; exit; }
}
install_tomcat () {
if ! [ -f "$DIR/$TOMCAT_FILE" ];then
color 1 "$TOMCAT_FILE 文件不存在"
exit;
elif [ -d $TOMCAT_DIR/tomcat ];then
color 1 "TOMCAT 已经安装"
exit
else
[ -d "$TOMCAT_DIR" ] || mkdir -pv $TOMCAT_DIR
fi
tar xf $DIR/$TOMCAT_FILE -C $TOMCAT_DIR
cd $TOMCAT_DIR && ln -s apache-tomcat-*/ tomcat
echo "PATH=$TOMCAT_DIR/tomcat/bin:"'$PATH' > /etc/profile.d/tomcat.sh
id tomcat &> /dev/null || useradd -r -s /sbin/nologin tomcat
cat > $TOMCAT_DIR/tomcat/conf/tomcat.conf <<EOF
JAVA_HOME=$JDK_DIR/jdk
EOF
chown -R tomcat.tomcat $TOMCAT_DIR/tomcat/
cat > /lib/systemd/system/tomcat.service <<EOF
[Unit]
Description=Tomcat
#After=syslog.target network.target remote-fs.target nss-lookup.target
After=syslog.target network.target
[Service]
Type=forking
EnvironmentFile=$TOMCAT_DIR/tomcat/conf/tomcat.conf
ExecStart=$TOMCAT_DIR/tomcat/bin/startup.sh
ExecStop=$TOMCAT_DIR/tomcat/bin/shutdown.sh
RestartSec=3
PrivateTmp=true
User=tomcat
Group=tomcat
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now tomcat.service &> /dev/null
systemctl is-active tomcat.service &> /dev/null && color 0 "TOMCAT 安装完成" || { color 1 "TOMCAT 安装失败" ; exit; }
}
install_jdk
install_tomcat
root@web2:/opt# bash install_tomcat.sh
root@web2:~# mkdir /data/service/tomcat/webapps/myapp
root@web2:~# echo "Tomcat test Page" > /data/service/tomcat/webapps/myapp/index.html
root@web2:~# chown -R tomcat.tomcat /data/service/tomcat/webapps/
root@web2:~# vi /data/service/tomcat/conf/server.xml
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="tomcat_access_log" suffix=".log"
pattern="{"clientip":"%h","ClientUser":"%l","authenticated":"%u","AccessTime":"%t","method":"%r","status":"%s","SendBytes":"%b","Query?string":"%q","partner":"%{Referer}i","AgentVersion":"%{User-Agent}i"}"/>
root@web2:~# systemctl stop tomcat.service
root@web2:~# rm -f /usr/local/tomcat/logs/* #这里是为了测试查看日志的
root@web2:~# systemctl restart tomcat.service
在浏览器上访问web页面测试查看日志
root@web2:~# tail -f /data/service/tomcat/logs/tomcat_access_log.2023-03-23.log
2.2.2、安装filebeat
root@web2:~# cd /opt/
root@web2:/opt# ls
apache-tomcat-9.0.62.tar.gz filebeat-7.17.9-amd64.deb install_tomcat.sh install_tomcat_service.tar.gz jdk-8u311-linux-x64.tar.gz
root@web2:/opt# dpkg -i filebeat-7.17.9-amd64.deb
root@web2:~# vi /etc/filebeat/filebeat.yml
root@web2:~# grep -Ev "#|^$" /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /data/service/tomcat/logs/tomcat_access_log.*.log
fields:
project: tomcat-accesslog
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
output.logstash:
hosts: ["10.0.0.105:5066","10.0.0.105:5088"]
enabled: true
worker: 2
compression_level: 3
loadbalance: true
root@web2:~# systemctl restart filebeat.service
3、部署redis服务
将之前编译好的redis放到一个服务器上,将配置稍作修改一下,在把redis服务起来
root@redis:~# mkdir -p /data/service/ && cd $_
root@redis:/data/service# ls
redis6.2.6.tar.gz
root@redis:/data/service# tar xf redis6.2.6.tar.gz
root@redis:/data/service# ls
redis redis6.2.6.tar.gz
root@redis:/data/service# ls redis
bin data etc logs redis.service
root@redis:/data/service# cd redis/
root@redis:/data/service/redis# vi etc/redis.conf
root@redis:/data/service/redis# grep -Ev "^#|^$" etc/redis.conf
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile "/data/service/redis/logs/redis.log"
databases 32
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
rdb-del-sync-files no
dir /data/service/redis/data
replica-serve-stale-data yes
replica-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
replica-priority 100
acllog-max-len 128
requirepass pxt521314
maxmemory 3G
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
root@redis:/data/service/redis# ln -s /data/service/redis/bin/* /usr/bin/
root@redis:/data/service/redis# mv redis.service /lib/systemd/system/redis.service
root@redis:~# systemctl daemon-reload
root@redis:~# systemctl enable --now redis.service
root@redis:~# lsof -i:6379
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
redis-ser 7694 root 6u IPv4 59248 0t0 TCP *:6379 (LISTEN)
4、部署logstash服务
4.1、部署一个logstash服务将filebeat转发的日志存储到redis上
logstash服务是需要安装jdk环境的,这里我就不演示安装jdk环境了。
root@logstash2:~# wget https://artifacts.elastic.co/downloads/logstash/logstash-7.17.9-amd64.deb
root@logstash2:~# dpkg -i logstash-7.17.9-amd64.deb
root@logstash2:~# vi /etc/systemd/system/logstash.service #这里我修改用户和组是因为怕后续收集日志的时候没有权限
User=root
Group=root
root@logstash2:~# systemctl daemon-reload
root@logstash2:~# systemctl enable --now logstash.service
root@logstash2:~# vi /etc/logstash/conf.d/filebeat-to-redis.conf
root@logstash2:~# cat /etc/logstash/conf.d/filebeat-to-redis.conf
input {
beats {
port => 5066
codec => "json"
}
beats {
port => 5088
codec => "json"
}
}
output {
if [fields][project] == "nginx-web-accesslog" {
redis{
data_type => "list"
key => "zhang-nginx-web-accesslog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "0"
password => "pxt521314"
}}
if [fields][project] == "nginx-errorlog" {
redis{
data_type => "list"
key => "zhang-nginx-errorlog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "0"
password => "pxt521314"
}}
if [fields][project] == "tomcat-accesslog" {
redis{
data_type => "list"
key => "zhang-tomcat-accesslog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "1"
password => "pxt521314"
}}
}
root@logstash2:~# systemctl restart logstash.service
重启logstash服务后可以去redis上去验证一下数据
4.2、部署另一个logstash服务去redis获取数据写入到elasticsearch集群中
这一步需要先安装好elasticsearch集群的,不然写好下面的配置文件重启logstash服务会起不来的。
root@logstash1:~# wget https://artifacts.elastic.co/downloads/logstash/logstash-7.17.9-amd64.deb
root@logstash1:~# dpkg -i logstash-7.17.9-amd64.deb
root@logstash1:~# vi /etc/systemd/system/logstash.service
User=root
Group=root
root@logstash1:~# systemctl daemon-reload
root@logstash1:~# systemctl enable --now logstash.service
root@logstash1:~# vi /etc/logstash/conf.d/redis-to-es.conf
root@logstash1:~# cat /etc/logstash/conf.d/redis-to-es.conf
input {
redis {
data_type => "list"
key => "zhang-nginx-web-accesslog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "0"
password => "pxt521314"
}
redis {
data_type => "list"
key => "zhang-nginx-errorlog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "0"
password => "pxt521314"
}
redis {
data_type => "list"
key => "zhang-tomcat-accesslog-filebeat"
host => "10.0.0.104"
port => "6379"
db => "1"
password => "pxt521314"
}
}
output {
if [fields][project] == "nginx-web-accesslog" {
elasticsearch {
hosts => ["10.0.0.100:9200","10.0.0.101:9200","10.0.0.102:9200"]
index => "filebeat-logstash-zhang-nginx-web-accesslog-%{+YYYY.MM.dd}"
}}
if [fields][project] == "nginx-errorlog" {
elasticsearch{
hosts => ["10.0.0.100:9200","10.0.0.101:9200","10.0.0.102:9200"]
index => "filebeat-logstash-zhang-nginx-errorlog-%{+YYYY.MM.dd}"
}}
if [fields][project] == "tomcat-accesslog" {
elasticsearch{
hosts => ["10.0.0.100:9200","10.0.0.101:9200","10.0.0.102:9200"]
index => "filebeat-logstash-zhang-tomcat-accesslog-%{+YYYY.MM.dd}"
}}
}
root@logstash1:~# systemctl restart logstash.service
5、部署elasticsearch+kibana集群
在准备的三台elasticsearch主机上做以下相关操作,elasticsearch也是需要安装jdk环境的,这里我就省略了。
5.1、安装elasticsearch
root@es-node1:~# cd /opt/
root@es-node1:/opt# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.17.9-amd64.deb
root@es-node1:/opt# scp elasticsearch-7.17.9-amd64.deb 10.0.0.101:/opt/
root@es-node1:/opt# scp elasticsearch-7.17.9-amd64.deb 10.0.0.102:/opt/
root@es-node1:/opt# dpkg -i elasticsearch-7.17.9-amd64.deb
root@es-node1:/opt# vi /etc/elasticsearch/elasticsearch.yml
root@es-node1:/opt# grep -Ev "^$|^#" /etc/elasticsearch/elasticsearch.yml
cluster.name: myelk-cluster #ELK的集群名称,名称相同的话就属于是同一个集群
node.name: elk-node1 #当前节点在集群内的节点名称,节点名称是要唯一的,在其他两个节点上需要修改
path.data: /data/service/elasticsearch/data #数据保存的目录
path.logs: /data/service/elasticsearch/logs #日志保存的目录
bootstrap.memory_lock: true #服务在启动的时候锁定足够的内存,防止数据接入swap
network.host: 0.0.0.0 #监听的地址
http.port: 9200 #监听端口
discovery.seed_hosts: ["10.0.0.100", "10.0.0.101","10.0.0.102"] #在集群中node节点发现主机列表
cluster.initial_master_nodes: ["10.0.0.100", "10.0.0.101","10.0.0.102"] #在集群初始化的时候这些节点地址可以被选举成master节点
root@es-node1:~# vim /etc/elasticsearch/jvm.options #建议配置的内存限制不要超过主机内存的一半,也不要超过30G。
-Xms1g
-Xms1g
root@es-node1:~# vi /lib/systemd/system/elasticsearch.service #修改内存限制
LimitMEMLOCK=infinity #无限制使用内存
root@es-node1:~# mkdir -p /data/service/elasticsearch/{data,logs}
root@es-node1:~# chown -R elasticsearch.elasticsearch /data/service/elasticsearch
root@es-node1:~# systemctl daemon-reload
root@es-node1:~# systemctl enable elasticsearch.service
root@es-node1:~# systemctl start elasticsearch.service
5.2、安装kibana
在三台elasticsearch主机上都安装kibana服务
root@es-node1:~# wget https://artifacts.elastic.co/downloads/kibana/kibana-7.17.9-amd64.deb
root@es-node1:~# dpkg -i kibana-7.17.9-amd64.deb
root@es-node1:~# vi /etc/kibana/kibana.yml
server.port: 5601
server.host: "10.0.0.100"
elasticsearch.hosts: ["http://10.0.0.100:9200","http://10.0.0.101:9200","http://10.0.0.102:9200"]
i18n.locale: "zh-CN"
root@es-node1:~# systemctl enable kibana.service
root@es-node1:~# systemctl start kibana.service
root@es-node1:~# ss -tnl | grep 5601
LISTEN 0 511 10.0.0.100:5601 0.0.0.0:*
5.3、安装nginx+keepalived实现高可用和kibana的访问认证
这里我就在es-node2和es-node3上安装nginx和keepalived服务了
root@es-node2:~# apt -y install keepalived
root@es-node2:~# cp /usr/share/doc/keepalived/samples/keepalived.conf.sample /etc/keepalived/keepalived.conf
root@es-node2:~# vim /etc/keepalived/keepalived.conf
root@es-node2:~# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
18473514861@163.com
}
notification_email_from 1916829748@qq.com
smtp_server smtp.qq.com
smtp_connect_timeout 30
router_id es-node2 #在另一个机器上修改一下
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
vrrp_mcast_group4 234.0.0.100
}
vrrp_instance VI_1 {
state MASTER #在另一个主机上修改成BACKUP
interface ens32
virtual_router_id 66
priority 100 #在另一个主机上也修改小一点
advert_int 1
authentication {
auth_type PASS
auth_pass pxt521314
}
virtual_ipaddress {
10.0.0.188 dev ens32 label ens32:1
}
}
root@es-node2:~# systemctl restart keepalived.service
root@es-node2:~# cd /data/service/
root@es-node2:/data/service# ls
elasticsearch nginx-1.22.tar.gz
root@es-node2:/data/service# tar xf nginx-1.22.tar.gz
root@es-node2:/data/service# rm -f nginx-1.22.tar.gz
root@es-node2:/data/service# cd nginx/
root@es-node2:/data/service/nginx# ls
client_body_temp conf conf.d fastcgi_temp html logs nginx.service proxy_temp sbin scgi_temp uwsgi_temp
root@es-node2:/data/service/nginx# ln -s /data/service/nginx/sbin/nginx /usr/bin/nginx
root@es-node2:/data/service/nginx# apt -y install apache2-utils
root@es-node2:/data/service/nginx# htpasswd -cb /data/service/nginx/conf.d/.htpasswd zhang pxt521314
Adding password for user zhang
root@es-node2:/data/service/nginx# htpasswd -b /data/service/nginx/conf.d/.htpasswd test zxcvbnm08
Adding password for user test
root@es-node2:/data/service/nginx# tail /data/service/nginx/conf.d/.htpasswd
zhang:$apr1$N5hhjVGj$gqRnNwODTdNiktGMdTKEh/
test:$apr1$QnT30nKc$8eWpsHWtssq57lSHhDNlY1
root@es-node2:/data/service/nginx# vi conf.d/kibana.conf
upstream kibana_server {
server 10.0.0.100:5601 max_fails=3 fail_timeout=30s;
server 10.0.0.101:5601 max_fails=3 fail_timeout=30s;
server 10.0.0.102:5601 max_fails=3 fail_timeout=30s;
}
server {
listen 80;
server_name elk.stars.com;
auth_basic "login password";
auth_basic_user_file /data/service/nginx/conf.d/.htpasswd;
location / {
proxy_pass http://kibana_server;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real_IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_send_timeout 600s;
proxy_read_timeout 600s;
}
}
root@es-node2:/data/service/nginx# nginx -t
root@es-node2:/data/service/nginx# mv nginx.service /lib/systemd/system/nginx.service
root@es-node2:/data/service/nginx# systemctl daemon-reload
root@es-node2:/data/service/nginx# systemctl enable nginx.service
root@es-node2:/data/service/nginx# systemctl start nginx.service
在windows主机上找到hosts文件加上本地解析,将elk.stars.com指向刚刚添加的vip上面,windows的hosts文件路径在C:\Windows\System32\drivers\etc目录下面。
5.4、将前面logstash写入elasticsearch的数据在kibana上展示
可以在浏览器上安装es-client插件来看是否有生成索引