Elasticsearch 重点,存储空间 分布式集群:负责日志检索和储存
Logstash 根据压力负载情况,多节点:负责日志的收集和分析、处理
Kibana 单节点:负责日志的可视化集群部署分配
se1 192.168.4.61 数据库分布式集群
se2 192.168.4.62 数据库分布式集群
se3 192.168.4.63 数据库分布式集群
se4 192.168.4.64 数据库分布式集群
se5 192.168.4.65 数据库分布式集群
kibana 192.168.4.66 日志的可是化(图表)
logstash 192.168.4.67 收集分析,处理日志 在真机上将所需要的包传过来
[root@room9pc01 ~]# for i in {61..67}
> do
> scp '/root/桌面/elk.tar' '/root/桌面/elasticsearch-2.3.4.rpm' 192.168.4.$i:/root/
> done部署
[root@se1 ~]# cd /etc/yum.repos.d/
[root@se1 yum.repos.d]# ls
[root@se1 yum.repos.d]# vim dvd.repo
[root@se1 yum.repos.d]# cat dvd.repo
[dvd]
name=dvd
baseurl=ftp://192.168.4.254/centos
enable=1
gpgcheck=0 [root@se1 ~]# vim /etc/hosts
192.168.4.61 se1
192.168.4.62 se2
192.168.4.63 se3
192.168.4.64 se4
192.168.4.65 se5[root@se1 ~]# yum -y install java-1.8.0-openjdk.x86_64
[root@se1 ~]# java-version
[root@se1 ~]# sestatus #查看selinux状态
[root@se1 ~]# yum -y install elasticsearch-2.3.4.rpm[root@se1 ~]# vim /etc/elasticsearch/elasticsearch.yml
17 cluster.name: myelk //配置集群名字
23 node.name: se1 //当前主机名称
54 network.host: 0.0.0.0 // 0.0.0.0(监听所有地址)
68 discovery.zen.ping.unicast.hosts: ["se1", "se2","se3"] [root@se1 ~]# systemctl restart elasticsearch
[root@se1 ~]# systemctl enable elasticsearch
[root@se1 ~]# ss -antup | grep 9200测试,看是否安装成功
[root@se1 ~]# firefox http://192.168.4.61:9200/
[root@se1 ~]# firefox http://192.168.4.61:9200/_cluster/health?pretty在其他四台虚拟机上安装elasticsearch
[root@se2 ~]# yum -y install java-1.8.0-openjdk elasticsearch-2.3.4.rpm
[root@se3 ~]# yum -y install java-1.8.0-openjdk elasticsearch-2.3.4.rpm
[root@se4 ~]# yum -y install java-1.8.0-openjdk elasticsearch-2.3.4.rpm
[root@se5 ~]# yum -y install java-1.8.0-openjdk elasticsearch-2.3.4.rpm在其他四台虚拟机上更改/etc/elasticsearch/elasticsearce.yml文件
[root@se1 ~]# for i in {62..65}
> do
> scp /etc/elasticsearch/elasticsearch.yml \
> 192.168.4.$i:/etc/elasticsearch/elasticsearch.yml
> done[root@se2 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se2
[root@se3 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se3
[root@se4 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se4
[root@se5 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se5在se1上将文件传给其他四台虚拟机机
[root@se1 ~]# for i in {62..65}
> do
> scp /etc/hosts 192.168.4.$i:/etc/
> done将yum源传给所有的虚拟机
[root@se1 ~]# for i in {62..67}
> do
> scp /etc/yum.repos.d/dvd.repo 192.168.4.$i:/etc/yum.repos.d/
> done修改其他四台虚拟机的/etc/elasticsearch/elasticsearch.yml
[root@se2 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se2
[root@se3 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se3
[root@se4 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se4
[root@se5 ~]# vim /etc/elasticsearch/elasticsearch.yml
node.name: se5启动其他四台的elasticsearch服务,并设置为开机自启
[root@se2 ~]# systemctl restart elasticsearch
[root@se2 ~]# systemctl enable elasticsearch
[root@se3 ~]# systemctl restart elasticsearch
[root@se3 ~]# systemctl enable elasticsearch
[root@se4 ~]# systemctl restart elasticsearch
[root@se4 ~]# systemctl enable elasticsearch
[root@se5 ~]# systemctl restart elasticsearch
[root@se5 ~]# systemctl enable elasticsearch再次测试
[root@se1 ~]# firefox http://192.168.4.62:9200/_cluster/health?pretty
{
"cluster_name" : "myelk", #集群名称
"status" : "green", #集群状态
"timed_out" : false,
"number_of_nodes" : 5, #集群的节点数
"number_of_data_nodes" : 5,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
curl命令的使用
curl 常用参数介绍:
-A 修改请求 agent
-X 设置请求方法
-i 显示返回头信息
http的请求方法:
常用方法 GET,POST,HEAD
其他方法 OPTIONS,PUT,DELETE,TRACE和CONNECT
ES常用:
PUT --增
DELETE --删
POST --改
GET --查索引的分片信息
[root@se1 ~]# curl -X GET http://192.168.4.61:9200/_cathealth的详细信息
[root@se1 ~]# curl -X GET http://192.168.4.61:9200/_cat/health?vnodes的帮助
[root@se1 ~]# curl -X GET http://192.168.4.61:9200/_cat/nodes?help
在se5上部署插件
安装head插件
在真机上将elk.tar解压到/var/ftp/elk/下面
[root@se5 ~]# cd /usr/share/elasticsearch/bin/
[root@se5 bin]# ./plugin install \ #安装head插件
> ftp://192.168.4.254/elk/elasticsearch-head-master.zip[root@se5 bin]# ./plugin install \ #安装bigdesk插件
> ftp://192.168.4.254/elk/bigdesk-master.zip[root@se5 bin]# ./plugin install \ #安装kopf插件
> ftp://192.168.4.254/elk/elasticsearch-kopf-master.zip[root@se5 bin]# ./plugin list #查看插件
Installed plugins in /usr/share/elasticsearch/plugins:
- head
- bigdesk
- kopf访问插件
[root@se5 bin]# firefox http://192.168.4.65:9200/_plugin/head
[root@se5 bin]# firefox http://192.168.4.65:9200/_plugin/kopf
[root@se5 bin]# firefox http://192.168.4.65:9200/_plugin/bigdeskES数据库的基本操作
创建索引
[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/hehe/' -d '{
> "settings":{
> "index":{
> "number_of_shards": 5,
> "number_of_replicas": 1
> }
> }
> }'增加
[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/tarena/teacher/1' -d '{
> "title":"测试",
> "name":{"first":"小明","last":"赵明"},
> "age":25
> }'[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/tarena/teacher/1' -d '{
> "title":"测试",
> "name":{"first":"小明","last":"赵明"},
> "age":25
> }'[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/tarena/teacher/2' -d '{
> "title":"测试1",
> "name":{"first":"小张","last":"赵张"},
> "age":20
> }'[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/tarena/teacher/3' -d '{
> "title":"测试3",
> "name":{"first":"小侯","last":"赵侯"},
> "age":18
> }'[root@se5 bin]# curl -XPUT 'http://192.168.4.65:9200/tarena/teacher/4' -d '{
> "title":"测试4",
> "name":{"first":"小雄","last":"赵雄"},
> "age":23
> }'改
[root@se5 bin]# curl -XPOST 'http://192.168.4.65:9200/tarena/teacher/3/_update' -d '{
"doc":{
"age":18
}
} '查
[root@se5 bin]# curl -XGET 'http://192.168.4.65:9200/tarena/teacher/1'
{"_index":"tarena","_type":"teacher","_id":"1","_version":1,"found":true,"_source":{
"title":"测试",
"name":{"first":"小明","last":"赵明"},
"age":25删
[root@se5 bin]# curl -XDELETE 'http://192.168.4.65:9200/tarena/teacher/1'
kibana 安装配置
在真机上将elk.tar传到66和67上
[root@room9pc01 ~]# scp '/root/桌面/elk/elk 1/elk.tar' 192.168.4.66:/root/
[root@room9pc01 ~]# scp '/root/桌面/elk/elk 1/elk.tar' 192.168.4.67:/root/[root@kibana ~]# tar -xf elk.tar
[root@kibana ~]# yum -y install kibana-4.5.2-1.x86_64.rpm
[root@kibana ~]# rpm -qc kibana
/opt/kibana/config/kibana.yml[root@kibana ~]# vim /opt/kibana/config/kibana.yml
server.port: 5601 #不能用80,会出现错误
server.host: "0.0.0.0" #服务器监听地址
elasticsearch.url: "http://192.168.1.11:9200" #声明地址,se中任意一个
kibana.index: ".kibana" #kibana自己创建的索引
kibana.defaultAppId: "discover" #打开kibana页面时,默认打开的页面discover
elasticsearch.pingTimeout: 1500 #ping检测超时时间
elasticsearch.requestTimeout: 30000 #请求超时
elasticsearch.startupTimeout: 5000 #启动超时[root@kibana ~]# systemctl restart kibana
[root@kibana ~]# systemctl enable kibana
[root@kibana ~]# ss -antulp | grep 5601
tcp LISTEN 0 128 *:5601 *:* users:(("node",pid=5344,fd=11))测试
[root@kibana ~]# firefox 192.168.4.66:5601
页面>点击status>全部为绿色,才证明kibana安装成功用head插件访问会有.kibana的索引信息
[root@se5 ~]# firefox http://192.168.4.65:9200/_plugin/head/ 数据的导入
[root@kibana ~]# gzip -d logs.jsonl.gz
[root@kibana ~]# gzip -d accounts.json.gz
[root@kibana ~]# gzip -d shakespeare.json.gz
[root@kibana ~]# curl -X POST "http://192.168.4.61:9200/_bulk" \
--data-binary @shakespeare.json #url 编码使用data-binary导入含有index配置的json文件[root@kibana ~]# curl -X POST "http://192.168.4.61:9200/xixi/haha/_bulk" \
--data-binary @accounts.json [root@kibana ~]# curl -X POST "http://192.168.4.61:9200/_bulk" \
> --data-binary @logs.jsonl GET查询
[root@kibana ~]# curl -X GET "http://192.168.4.61:9200/_mget?pretty" -d '{
> "docs":[
> {
> "_index":"shakespeare",
> "_type:":"act",
> "_id":0
> },
> {
> "_index":"shakespeare",
> "_type:":"line",
> "_id":0
> },
> {
> "_index":"xixi",
> "_type:":"haha",
> "_id":25
> }
> ]
> }'查询所得到的结果
{
"docs" : [ {
"_index" : "shakespeare",
"_type" : "act",
"_id" : "0",
"_version" : 1,
"found" : true,
"_source" : {
"line_id" : 1,
"play_name" : "Henry IV",
"speech_number" : "",
"line_number" : "",
"speaker" : "",
"text_entry" : "ACT I"
}
}, {
"_index" : "shakespeare",
"_type" : "act",
"_id" : "0",
"_version" : 1,
"found" : true,
"_source" : {
"line_id" : 1,
"play_name" : "Henry IV",
"speech_number" : "",
"line_number" : "",
"speaker" : "",
"text_entry" : "ACT I"
}
}, {
"_index" : "xixi",
"_type" : "haha",
"_id" : "25",
"_version" : 1,
"found" : true,
"_source" : {
"account_number" : 25,
"balance" : 40540,
"firstname" : "Virginia",
"lastname" : "Ayala",
"age" : 39,
"gender" : "F",
"address" : "171 Putnam Avenue",
"employer" : "Filodyne",
"email" : "virginiaayala@filodyne.com",
"city" : "Nicholson",
"state" : "PA"
}
} ]
} 查看logs是否导入成功(里面有三天的日志,logstash-2015.05.20/19/18)
[root@se5 ~]# firefox http://192.168.1.65:9200/_plugin/head/kibana导入数据
[root@kibana ~]# firefox http://192.168.4.66:5601
成功创建会有logstash-*(第一个空格),>在下一个空格要选择@timestamp>点击绿色的create来创建
>之后点击Discover>点击又上角的的Lsat 15 miuntes>点击左列的Absolute>选择时间2015-5-15到2015-5-22>点击Go>会显示日志信息
做其他图形
点击上方的Visualize>点击Pie chart制作饼状图>点击From a new search>点击Split Slices>
选择Aggregation下的Terms,选择Field下的memory>点击Options右边的播放键>点击右上角的保存按钮>点击Save保存查看制作的图形
点击上方的Dashboard>点击+>点击当时制作图形时的名字(New Visualization)>点击保存按钮,点击Save保存
综合练习
练习插件
安装一台Apache服务并配置
使用filebeat收集Apache服务器的日志
使用grok处理filebeat发送过来的日志
存入elasticsearch[root@logstash ~]# vim /etc/hosts
192.168.4.61 se1
192.168.4.62 se2
192.168.4.63 se3
192.168.4.64 se4
192.168.4.65 se5
192.168.4.66 kibana
192.168.4.67 logstash安装logstash
[root@logstash ~]# tar -xf elk.tar
[root@logstash ~]# yum -y install logstash-2.3.4-1.noarch.rpm
[root@logstash ~]# yum -y install java-1.8.0-openjdk
[root@logstash ~]# java -version
[root@logstash ~]# touch /etc/logstash/logstash.conf
[root@logstash ~]# /opt/logstash/bin/logstash --version
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
logstash 2.3.4[root@logstash ~]# /opt/logstash/bin/logstash-plugin list #查看插件
logstash-input-stdin #标准输入插件
logstash-output-stdout #标准输出插件[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
stdin{
}
}
filter{
}
output{
stdout{
}
}启动测试,logstash配置从标准输入读取输入源,然后从标准输出到屏幕
[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
aa
2018-11-09T12:36:06.608Z logstash aa
hello
2018-11-09T12:36:20.541Z logstash hello 插件
插件文档
https://github.com/logstash-plugins codec类插件
[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
stdin{
codec => "json" #输入设置为编码json
}
}
filter{
}
output{
stdout{
codec => "rubydebug" #输出设置为rubydebug
}
} 测试
[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => "\e[A\e[B",
"tags" => [
[0] "_jsonparsefailure"
],
"@version" => "1",
"@timestamp" => "2018-11-09T12:42:07.766Z",
"host" => "logstash"
} file模块插件
[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
file {
path => [ "/tmp/a.log", "/var/tmp/b.log" ]
sincedb_path => "/var/lib/logstash/sincedb" //记录读取文件的位置
start_position => "beginning" //配置第一次读取文件从什么地方开始
type => "testlog" //类型名称
}
}
filter{
}
output{
stdout{
codec => "rubydebug"
}
} [root@logstash ~]# touch /tmp/a.log
[root@logstash ~]# touch /var/tmp/b.log
[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => "a1",
"@version" => "1",
"@timestamp" => "2018-11-09T12:51:45.647Z",
"path" => "/tmp/a.log",
"host" => "logstash",
"type" => "testlog"
}
{
"message" => "b1",
"@version" => "1",
"@timestamp" => "2018-11-09T12:51:57.700Z",
"path" => "/var/tmp/b.log",
"host" => "logstash",
"type" => "testlog"
}另开一个终端:写入数据(屏幕输出会出现上面的两段内容)
[root@logstash ~]# echo a1 > /tmp/a.log
[root@logstash ~]# echo b1 > /var/tmp/b.log tcp、udp模块插件
[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
file {
path => [ "/tmp/a.log", "/var/tmp/b.log" ]
sincedb_path => "/var/lib/logstash/sincedb"
start_position => "beginning"
type => "testlog"
}
tcp {
host => "0.0.0.0"
port => "8888"
type => "tcplog"
}
udp {
host => "0.0.0.0"
port => "9999"
type => "udplog"
}
}
filter{
}
output{
stdout{
codec => "rubydebug"
}
}
[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started另开一个终端查看,可以看到端口
[root@logstash ~]# netstat -antup | grep 8888
tcp6 0 0 :::8888 :::* LISTEN 7608/java
[root@logstash ~]# netstat -antup | grep 9999
udp6 0 0 :::9999 :::* 7608/java 在另一台主机上写一个脚本,发送数据,使启动的logstash可以接收到数据
[root@se5 ~]# vim tcp.sh
function sendmsg(){
if [[ "$1" == "tcp" ]];then
exec 9<>/dev/tcp/192.168.4.67/8888
else
exec 9<>/dev/udp/192.168.4.67/9999
fi
echo "$2" >&9
exec 9<&-
} [root@se5 ~]# . tcp.sh //重新载入一下
[root@se5 ~]# sendmsg udp "is tcp test"
[root@se5 ~]# sendmsg udp "is tcp ss"输出结果如下
{
"message" => "is tcp test\n",
"@version" => "1",
"@timestamp" => "2018-11-09T13:00:54.172Z",
"type" => "udplog",
"host" => "192.168.4.65"
}
{
"message" => "is tcp ss\n",
"@version" => "1",
"@timestamp" => "2018-11-09T13:01:13.008Z",
"type" => "udplog",
"host" => "192.168.4.65"
} syslog插件练习
[root@logstash ~]# systemctl list-unit-files | grep syslog
rsyslog.service enabled
syslog.socket static
[root@logstash ~]# vim /etc/logstash/logstash.confinput{
file {
path => [ "/tmp/a.log", "/var/tmp/b.log" ]
sincedb_path => "/var/lib/logstash/sincedb"
start_position => "beginning"
type => "testlog"
}
tcp {
host => "0.0.0.0"
port => "8888"
type => "tcplog"
}
udp {
host => "0.0.0.0"
port => "9999"
type => "udplog"
}
syslog {
port => "514"
type => "syslog"
}
}
filter{
}
output{
stdout{
codec => "rubydebug"
}
}另一个终端查看是否检测到514
[root@logstash ~]# netstat -antup | grep 514
tcp6 0 0 :::514 :::* LISTEN 7800/java
udp6 0 0 :::514 :::* 7800/java 另一台主机上面操作,本地写的日志本地可以查看
[root@se5 ~]# vim /etc/rsyslog.conf
local0.info /var/log/mylog #自己添加这一行
[root@se5 ~]# systemctl restart rsyslog #重启rsyslog
[root@se5 ~]# ll /var/log/mylog
ls: 无法访问/var/log/mylog: 没有那个文件或目录
[root@se5 ~]# logger -p local0.info -t nsd "elk" #写日志
[root@se5 ~]# ll /var/log/mylog #再次查看,有文件
-rw-------. 1 root root 29 11月 9 21:11 /var/log/mylog
[root@se5 ~]# tail /var/log/mylog #可以查看到写的日志
Nov 9 21:11:50 se5 nsd: elk
[root@se5 ~]# tail /var/log/messages #可以查看到写的日志,因为配置文件里有写以.info结尾的可以收到
Nov 9 21:01:02 localhost systemd: Started Session 62 of user root.
Nov 9 21:01:02 localhost systemd: Starting Session 62 of user root.
Nov 9 21:10:01 localhost systemd: Started Session 63 of user root.
Nov 9 21:10:01 localhost systemd: Starting Session 63 of user root.
Nov 9 21:11:25 se5 rsyslogd: [origin software="rsyslogd" swVersion="8.24.0" x-pid="669" x-info="http://www.rsyslog.com"] exiting on signal 15.
Nov 9 21:11:25 se5 systemd: Stopping System Logging Service...
Nov 9 21:11:25 se5 systemd: Starting System Logging Service...
Nov 9 21:11:25 se5 rsyslogd: [origin software="rsyslogd" swVersion="8.24.0" x-pid="26754" x-info="http://www.rsyslog.com"] start
Nov 9 21:11:25 se5 systemd: Started System Logging Service.
Nov 9 21:11:50 se5 nsd: elk把本地的日志发送给远程4.67
[root@se5 ~]# vim /etc/rsyslog.conf
local0.info @192.168.1.67:514
//写一个@或两个@@都可以,一个@代表udp,两个@@代表tcp
[root@se5 ~]# systemctl restart rsyslog
[root@se5 ~]# logger -p local0.info -t nds "001 elk"[root@logstash bin]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
{
"message" => "001 elk",
"@version" => "1",
"@timestamp" => "2018-11-09T13:15:48.000Z",
"type" => "syslog",
"host" => "192.168.4.65",
"priority" => 134,
"timestamp" => "Nov 9 21:15:48",
"logsource" => "se5",
"program" => "nds",
"severity" => 6,
"facility" => 16,
"facility_label" => "local0",
"severity_label" => "Informational"
}rsyslog.conf配置向远程发送数据,远程登陆4.65的时侯,把登陆日志的信息(/var/log/secure)转发给logstash即4.67这台机器
[root@se5 ~]# vim /etc/rsyslog.conf
57 #authpriv.* /var/log/secure
58 authpriv.* @@192.168.4.67:514
[root@se5 ~]# systemctl restart rsyslog[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => "Unregistered Authentication Agent for unix-process:27287:2804379 (system bus name :1.173, object path /org/freedesktop/PolicyKit1/AuthenticationAgent, locale zh_CN.UTF-8) (disconnected from bus)\n",
"@version" => "1",
"@timestamp" => "2018-11-09T13:20:22.000Z",
"type" => "syslog",
"host" => "192.168.4.65",
"priority" => 85,
"timestamp" => "Nov 9 21:20:22",
"logsource" => "se5",
"program" => "polkitd",
"pid" => "696",
"severity" => 5,
"facility" => 10,
"facility_label" => "security/authorization",
"severity_label" => "Notice"
} filter grok插件
[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
stdin{ codec => "json" }
file {
path => [ "/tmp/a.log", "/var/tmp/b.log" ]
sincedb_path => "/var/lib/logstash/sincedb"
start_position => "beginning"
type => "testlog"
}
tcp {
host => "0.0.0.0"
port => "8888"
type => "tcplog"
}
udp {
host => "0.0.0.0"
port => "9999"
type => "udplog"
}
syslog {
port => "514"
type => "syslog"
}
}
filter{
grok{
match => ["message", "(?<key>reg)"]
}
}
output{
stdout{
codec => "rubydebug"
} [root@se5 ~]# yum -y install httpd
[root@se5 ~]# systemctl restart httpd
[root@se5 ~]# vim /var/log/httpd/access_log
192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] "GET / HTTP/1.1" 403 4897 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"复制/var/log/httpd/access_log的日志到logstash下的/tmp/a.log
[root@logstash ~]# vim /tmp/a.log
192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] "GET / HTTP/1.1" 403 4897 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"}
出现message的日志,但是没有解析是什么意思
[root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => ".168.4.254 - - [15/Sep/2018:18:25:46 +0800] \"GET / HTTP/1.1\" 403 4897 \"-\" \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0\"",
"@version" => "1",
"@timestamp" => "2018-11-09T13:26:25.746Z",
"path" => "/tmp/a.log",
"host" => "logstash",
"type" => "testlog",
"tags" => [
[0] "_grokparsefailure"
]
} 若要解决没有解析的问题,同样的方法把日志复制到/tmp/a.log,logstash.conf配置文件里面修改grok
[root@se5 ~]# vim /var/log/httpd/access_log
192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] "GET / HTTP/1.1" 403 4897 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"[root@logstash ~]# vim /tmp/a.log #新加一条日志
192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] "GET / HTTP/1.1" 403 4897 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] "GET / HTTP/1.1" 403 4897 "-" "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0"
[root@se5 ~]# systemctl restart rsyslog
[root@logstash ~]# vim /etc/logstash/logstash.conf
...
filter{
grok{
match => ["message", "%{COMBINEDAPACHELOG}"]
}
}
... [root@logstash ~]# /opt/logstash/bin/logstash -f /etc/logstash/logstash.conf
OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N
Settings: Default pipeline workers: 1
Pipeline main started
{
"message" => "192.168.4.254 - - [15/Sep/2018:18:25:46 +0800] \"GET / HTTP/1.1\" 403 4897 \"-\" \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0\"",
"@version" => "1",
"@timestamp" => "2018-11-10T01:24:21.233Z",
"path" => "/tmp/a.log",
"host" => "logstash",
"type" => "testlog",
"clientip" => "192.168.4.254",
"ident" => "-",
"auth" => "-",
"timestamp" => "15/Sep/2018:18:25:46 +0800",
"verb" => "GET",
"request" => "/",
"httpversion" => "1.1",
"response" => "403",
"bytes" => "4897",
"referrer" => "\"-\"",
"agent" => "\"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0\""
}安装Apache服务,用filebeat收集Apache服务器的日志,存入elasticsearch
[root@se5 ~]# systemctl restart httpd
[root@se5 ~]# ss -antulp | grep 80
[root@se5 ~]# tar -xf elk.tar
[root@se5 ~]# ls
[root@se5 ~]# yum -y install filebeat-1.2.3-x86_64.rpm
[root@se5 ~]# vim/etc/filebeat/filebeat.yml
paths:
- /var/log/httpd/access_log //日志的路径,短横线加空格代表yml格式
document_type: apachelog //文档类型
elasticsearch: //加上注释
hosts: ["localhost:9200"] //加上注释
logstash: //去掉注释
hosts: ["192.168.4.67:5044"] //去掉注释,logstash那台主机的ip
[root@se5 ~]# systemctl start filebeat[root@logstash ~]# vim /etc/logstash/logstash.conf
input{
stdin{ codec => "json" }
beats{
port => 5044
}
file {
path => [ "/tmp/a.log", "/var/tmp/b.log" ]
sincedb_path => "/dev/null"
start_position => "beginning"
type => "testlog"
}
tcp {
host => "0.0.0.0"
port => "8888"
type => "tcplog"
}
udp {
host => "0.0.0.0"
port => "9999"
type => "udplog"
}
syslog {
port => "514"
type => "syslog"
}
}
filter{
if [type] == "apachelog"{
grok{
match => ["message", "%{COMBINEDAPACHELOG}"]
}}
}
output{
stdout{ codec => "rubydebug" }
if [type] == "filelog"{
elasticsearch {
hosts => ["192.168.1.61:9200", "192.168.1.62:9200"]
index => "filelog"
flush_size => 2000
idle_flush_time => 10
}}
}[root@logstash logstash]# /opt/logstash/bin/logstash \
-f /etc/logstash/logstash.conf打开另一终端查看5044是否成功启动
[root@logstash ~]# netstat -antup | grep 5044
tcp6 0 0 :::5044 :::* LISTEN 2148/java[root@se5 ~]# firefox 192.168.4.65
回到原来的终端,有数据[root@logstash ~]# vim /etc/logstash/logstash.conf
.........
output{
stdout{ codec => "rubydebug" }
if [type] == "apachelog"{
elasticsearch {
hosts => ["192.168.4.61:9200", "192.168.4.62:9200"]
index => "apachelog"
flush_size => 2000
idle_flush_time => 10
}}
}浏览器访问Elasticsearch,有apachelog
[root@se5 ~]# firefox http://192.168.4.65:9200/_plugin/head
ELK集群调优
转载文章标签 ELK集群调优 elasticsearch vim firefox 文章分类 运维
上一篇:java 邮箱授权码有效期 过期
下一篇:python 主函数传参执行
-
Linux系统性能调优技巧
本文简要介绍了Linux系统性能调优的一些技巧。
文件系统 缓存 页面缓存