31. ELK日志收集


elk采集mysql elk采集容器日志_elk采集mysql

31.1 日志收集方式

 

1.node节点收集,基于daemonset部署日志收集进程,实现json-file类型(标准输出/dev/stdout、错误输出/dev/stderr)日志收集。

 

2.使用sidcar容器(一个pod多容器)收集当前pod内一个或者多个业务容器的日志(通常基于emptyDir实现业务容器与sidcar之间的日志共亭)。

 

3.在容器内置日志收集服务进程。

31.2 daemonset日志收集

logstach容器内收集-->kafka-zk-->logstach过滤写入-->ES-cluster

  • 把日志挂载到宿主机进行收集

 

基于daemonset运行日志收集服务,主要收集以下类型日志:

 

1.node节点收集,基于daemonset部署日志收集进程,实现json-file类型(标准输出/dev/stdout、错误输出/dev/stderr)日志收集,即应用程序产生的标准输出和错误输出的日志。

 

因为容器里的日志都是输出到标准输出、错误输出,然后需要提前把容器里的日志驱动与日志类型改成jsonfile类型

 

实现方式:

 

将容器内的日志改好jsonfile之后挂载到宿主机,在把宿主机的日志挂载到logstash中进行过滤,这样就收集起来了

  • 宿主机系统日志等以日志文件形式保存的日志

对比类型

containerd

docker

日志存储路径

真实路径:/var/log/pods/CONTAINER_NAMEs #真实路径<br />软连接:同时kubelet也会在/var/log/containers目录下创建软链接指向/var/log/pods/CONTAINER_NAMEs #真实路径<br />软连接:同时kubelet也会在/var/log/containers目录下创建软链接指向/var/log/pods/CONTAINER_NAMEs #真实路径<br />软连接:同时kubelet也会在/var/log/containers目录下创建软链接指向/var/log/pods/CONTAINER_NAMES

真实路径:/var/lib/docker/containers/软连接会在和创建软连接指向软连接会在和创建软连接指向CONTAINERID<br/>软连接:kubelet会在/var/log/pods和/var/log/containers创建软连接指向/var/lib/docker/containers/CONTAINERID

日志配置参数

配置文件:/etc/systemd/system/kubelet.service

配置参数:

--container-log-max-files=5

--container-log-max-size="10OMi"

--logging-format="json"

配置文件:/etc/docker/daemon.json

参数:"log-driver" : "json-file",

"log-opts" :{

"max-file" : "5",

"max-size": "100m"

}

    • Dockfile
     
    root@k8s-master1:~1.logstash-image-Dockerfile# cat Dockerfile 
     
    FROM logstash:7.12.1
     
     
     
    USER root
     
    WORKDIR /usr/share/logstash 
     
    #RUN rm -rf config/logstash-sample.conf
     
    ADD logstash.yml /usr/share/logstash/config/logstash.yml
     
    ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf 
    • logstash.conf
     
    #收集日志的路径为宿主机
     
    root@k8s-master1:~1.logstash-image-Dockerfile# cat logstash.conf 
     
    input {
     
      file {
     
    #这个是docker路径
     
    #path => "/var/lib/docker/containers/*/*-json.log" #docker
     
    #containerd路径
     
    "/var/log/pods/*/*/*.log"
     
    #如何之前有存在的日志就从头收集,默认是从结尾收集
     
    "beginning"
     
    #如果是containerd类型就加上jsonfile-daemonset-applog
     
    type => "jsonfile-daemonset-applog"
     
      }
     
     
     
      file {
     
    #把宿主机的系统日志也收集过来 在k8s YAML中定义
     
    "/var/log/*.log"
     
    "beginning"
     
    #如果是系统日志就加上这个类型jsonfile-daemonset-syslog
     
    type => "jsonfile-daemonset-syslog"
     
      }
     
    }
     
     
     
    output {
     
    if [type] == "jsonfile-daemonset-applog" {
     
        kafka {
     
    #k8s YAML中定义的KAFKA变量
     
    "${KAFKA_SERVER}"
     
    #k8s YAML中定义的TOPIC_ID
     
    "${TOPIC_ID}"
     
    #logstash每次向ES传输的数据量大小,单位为字节
     
    #编码json
     
    "${CODEC}" 
     
       } }
     
     
     
    if [type] == "jsonfile-daemonset-syslog" {
     
        kafka {
     
    "${KAFKA_SERVER}"
     
    "${TOPIC_ID}"
     
          batch_size => 16384
     
    "${CODEC}" #系统日志不是json格式
     
      }}
     
    }
    • logstash.yaml
     
    root@k8s-master1:~1.logstash-image-Dockerfile# cat logstash.yml 
     
    http.host: "0.0.0.0"
     
    #注释掉这个地址-xpack是一个安全认证
     
    #xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
    • build-command.sh
     
    root@k8s-master1:~1.logstash-image-Dockerfile# cat build-commond.sh 
     
    #!/bin/bash
     
     
     
    #docker build -t harbor.nbrhce.com/baseimages/logstash:v7.12.1-json-file-log-v4 .
     
     
     
    #docker push harbor.nbrhce.com/baseimages/logstash:v7.12.1-json-file-log-v4
     
     
     
    nerdctl build -t harbor.nbrhce.com/baseimages/logstash:v7.12.1-json-file-log-v1 .
     
     
     
    nerdctl push harbor.nbrhce.com/baseimages/logstash:v7.12.1-json-file-log-v1
    • k8s YAML DaemonSet-logstash容器内收集
     
    root@k8s-master1:~/20220821/ELK/1.daemonset-logstash# cat 2.DaemonSet-logstash.yaml 
     
    apiVersion: apps/v1
     
    kind: DaemonSet
     
    metadata:
     
      name: logstash-elasticsearch
     
      namespace: kube-system
     
      labels:
     
        k8s-app: logstash-logging
     
    spec:
     
      selector:
     
        matchLabels:
     
          name: logstash-elasticsearch
     
      template:
     
        metadata:
     
          labels:
     
            name: logstash-elasticsearch
     
        spec:
     
          tolerations:
     
    # this toleration is to have the daemonset runnable on master nodes
     
    # remove it if your masters can't run pods
     
          - key: node-role.kubernetes.io/master
     
            operator: Exists
     
            effect: NoSchedule
     
          containers:
     
          - name: logstash-elasticsearch
     
            image: harbor.nbrhce.com/baseimages/logstash:v7.12.1-json-file-log-v1 
     
    env:
     
    "KAFKA_SERVER"
     
    "172.31.4.101:9092,172.31.4.102:9092,172.31.4.103:9092"
     
    "TOPIC_ID"
     
    "jsonfile-log-topic"
     
    "CODEC"
     
    "json"
     
    #        resources:
     
    #          limits:
     
    #            cpu: 1000m
     
    #            memory: 1024Mi
     
    #          requests:
     
    #            cpu: 500m
     
    #            memory: 1024Mi
     
            volumeMounts:
     
    #定义宿主机系统日志挂载路径
     
    #宿主机系统日志挂载点
     
    #定义容器日志挂载路径,和logstash配置文件中的收集路径保持一直
     
    #mountPath: /var/lib/docker/containers #docker挂载路径
     
    #containerd挂载路径,此路径与logstash的日志收集路径必须一致
     
    false
     
          terminationGracePeriodSeconds: 30
     
          volumes:
     
    #宿主机系统日志挂载logstash容器这样就能收集了
     
          - name: varlog
     
            hostPath:
     
              path: /var/log 
     
    #宿主机containerd的日志挂载到logstash中
     
          - name: varlibdockercontainers
     
            hostPath:
     
              path: /var/lib/docker/containers 
     
              path: /var/log/pods 
    • logstach过滤日志 conf
     
    #这个是单独过滤日志的然后传给es集群
     
    root@k8s-master1:~1.daemonset-logstash# cat 3.logsatsh-daemonset-jsonfile-kafka-to-es.conf 
     
    input {
     
      kafka {
     
    #kafka集群地址
     
    "172.31.4.101:9092,172.31.4.102:9092,172.31.4.103:9092"
     
    #来自于哪个topics
     
    "jsonfile-log-topic"]
     
    #编码是json
     
    "json"
     
      }
     
    }
     
     
     
    output {
     
    #if [fields][type] == "app1-access-log" {
     
    if [type] == "jsonfile-daemonset-applog" {
     
        elasticsearch {
     
    "172.31.2.101:9200","172.31.2.102:9200"]
     
    #如果这个索引不存在那么会自动创建
     
    "jsonfile-daemonset-applog-%{+YYYY.MM.dd}"
     
        }}
     
     
     
    if [type] == "jsonfile-daemonset-syslog" {
     
        elasticsearch {
     
    "172.31.2.101:9200","172.31.2.102:9200"]
     
    "jsonfile-daemonset-syslog-%{+YYYY.MM.dd}"
     
        }}
     
     
     
    }

    31.3 Sidcar容器日志收集

    • 概述 轻量级日志收集容器
    使用sidcar容器一个pod多容器收集当前pod内一个或多个业务容器的日志、通常基于emptyDir实现业务容器与sidcar之间的日志共享
     
    容器之间的文件系统是隔离的,通常emptyDir来实现日志的共享,应该就是把业务容器的日志路径挂载到emptyDir,sidcar容器收集日志的路径就是这个emptyDir
     
    优点:这样收集日志的好处就是可以精细化服务的日志
     
    缺点:就是占用资源要是有旧业务容器还需要改造POD添加sidcar容器
    • Dockerfile制作镜像
     
    root@k8s-master1:~2.sidecar-logstash/1.logstash-image-Dockerfile# cat Dockerfile 
     
    FROM logstash:7.12.1
     
     
     
    USER root
     
    WORKDIR /usr/share/logstash 
     
    #RUN rm -rf config/logstash-sample.conf
     
    ADD logstash.yml /usr/share/logstash/config/logstash.yml
     
    ADD logstash.conf /usr/share/logstash/pipeline/logstash.conf 
    • logstash.yaml
     
    root@k8s-master1:~2.sidecar-logstash/1.logstash-image-Dockerfile# cat logstash.yml 
     
    http.host: "0.0.0.0"
     
    #xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]
    • logstash.conf
     
    root@k8s-master1:~2.sidecar-logstash/1.logstash-image-Dockerfile# cat logstash.conf 
     
    input {
     
      file {
     
    "/var/log/applog/catalina.out"
     
    "beginning"
     
    type => "app1-sidecar-catalina-log"
     
      }
     
      file {
     
    "/var/log/applog/localhost_access_log.*.txt"
     
    "beginning"
     
    type => "app1-sidecar-access-log"
     
      }
     
    }
     
     
     
    output {
     
    if [type] == "app1-sidecar-catalina-log" {
     
        kafka {
     
    "${KAFKA_SERVER}"
     
    "${TOPIC_ID}"
     
    #logstash每次向ES传输的数据量大小,单位为字节
     
    "${CODEC}" 
     
       } }
     
     
     
    if [type] == "app1-sidecar-access-log" {
     
        kafka {
     
    "${KAFKA_SERVER}"
     
    "${TOPIC_ID}"
     
          batch_size => 16384
     
    "${CODEC}"
     
      }}
     
    }
    • tomcat.yaml
     
    root@k8s-master1:~/20220821/ELK/2.sidecar-logstash# cat 2.tomcat-app1.yaml 
     
    kind: Deployment
     
    #apiVersion: extensions/v1beta1
     
    apiVersion: apps/v1
     
    metadata:
     
      labels:
     
        app: magedu-tomcat-app1-deployment-label
     
    #当前版本的deployment 名称
     
      namespace: magedu
     
    spec:
     
      replicas: 3
     
      selector:
     
        matchLabels:
     
          app: magedu-tomcat-app1-selector
     
      template:
     
        metadata:
     
          labels:
     
            app: magedu-tomcat-app1-selector
     
        spec:
     
          containers:
     
          - name: sidecar-container
     
            image: harbor.magedu.net/baseimages/logstash:v7.12.1-sidecar
     
            imagePullPolicy: IfNotPresent
     
    #imagePullPolicy: Always
     
    #将传递参数给kafka
     
    env:
     
    "KAFKA_SERVER"
     
    "172.31.4.101:9092,172.31.4.102:9092,172.31.4.103:9092"
     
    "TOPIC_ID"
     
    "tomcat-app1-topic"
     
    "CODEC"
     
    "json"
     
    #挂载到容器里这个路径--配置文件与其对应这个路径
     
            volumeMounts:
     
            - name: applogs
     
              mountPath: /var/log/applog
     
          - name: magedu-tomcat-app1-container
     
            image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/tomcat-app1:v1
     
            imagePullPolicy: IfNotPresent
     
    #imagePullPolicy: Always
     
            ports:
     
            - containerPort: 8080
     
              protocol: TCP
     
              name: http
     
    env:
     
    "password"
     
    "123456"
     
    "age"
     
    "18"
     
            resources:
     
              limits:
     
                cpu: 1
     
    "512Mi"
     
              requests:
     
                cpu: 500m
     
    "512Mi"
     
            volumeMounts:
     
            - name: applogs
     
              mountPath: /apps/tomcat/logs
     
            startupProbe:
     
              httpGet:
     
                path: /myapp/index.html
     
                port: 8080
     
    #首次检测延迟5s
     
    #从成功转为失败的次数
     
    #探测间隔周期
     
            readinessProbe:
     
              httpGet:
     
    #path: /monitor/monitor.html
     
                path: /myapp/index.html
     
                port: 8080
     
              initialDelaySeconds: 5
     
              periodSeconds: 3
     
              timeoutSeconds: 5
     
              successThreshold: 1
     
              failureThreshold: 3
     
            livenessProbe:
     
              httpGet:
     
    #path: /monitor/monitor.html
     
                path: /myapp/index.html
     
                port: 8080
     
              initialDelaySeconds: 5
     
              periodSeconds: 3
     
              timeoutSeconds: 5
     
              successThreshold: 1
     
              failureThreshold: 3
     
          volumes:
     
    #定义通过emptyDir实现业务容器与sidecar容器的日志共享,以让sidecar收集业务容器中的日志
     
            emptyDir: {}
    • 31.4 filebeat容器内置进程收集
    • Dockerfile 在做业务镜像的时候添加进去
    root@k8s-master1:~/20220821/ELK/3.container-filebeat-process/1.webapp-filebeat-image-Dockerfile# cat Dockerfile 
     
    #tomcat web1
     
    FROM harbor.magedu.net/pub-images/tomcat-base:v8.5.43 
     
     
     
    ADD catalina.sh /apps/tomcat/bin/catalina.sh
     
    ADD server.xml /apps/tomcat/conf/server.xml
     
    #ADD myapp/* /data/tomcat/webapps/myapp/
     
    ADD myapp.tar.gz /data/tomcat/webapps/myapp/
     
    ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh
     
    ADD filebeat.yml /etc/filebeat/filebeat.yml 
     
    RUN chown  -R tomcat.tomcat /data/ /apps/
     
    #ADD filebeat-7.5.1-x86_64.rpm /tmp/
     
    #RUN cd /tmp && yum localinstall -y filebeat-7.5.1-amd64.deb
     
     
     
    EXPOSE 8080 8443
     
     
     
    CMD ["/apps/tomcat/bin/run_tomcat.sh"]
    • filebeat配置文件
     
    root@k8s-master1:~1.webapp-filebeat-image-Dockerfile# cat filebeat.yml 
     
    #采集日志
     
    filebeat.inputs:
     
    - type: log
     
    #这个enabled是启用这段配置、不是true就不会加载
     
    true
     
      paths:
     
    #收集业务容器日志-运行日志
     
        - /apps/tomcat/logs/catalina.out
     
      fields:
     
    #定义的类型与名字
     
    type: filebeat-tomcat-catalina
     
    - type: log
     
    #在定义一个类型访问日志
     
    true
     
      paths:
     
        - /apps/tomcat/logs/localhost_access_log.*.txt 
     
      fields:
     
    type: filebeat-tomcat-accesslog
     
    #这里是默认的配置文件 可以不用动
     
    filebeat.config.modules:
     
    ${path.config}/modules.d/*.yml
     
    false
     
    setup.template.settings:
     
      index.number_of_shards: 1
     
    setup.kibana:
     
     
     
    #这里是输出到哪里
     
    output.kafka:
     
    "172.31.4.101:9092"]
     
    #确认ack保证数据完整性
     
      required_acks: 1
     
    #写的kafka中的topic
     
    "filebeat-magedu-app1"
     
    #开启压缩节省带宽但是占CPU
     
      compression: gzip
     
    #最大字节不能超过这个值
     
      max_message_bytes: 1000000
     
    #output.redis:
     
    #  hosts: ["172.31.2.105:6379"]
     
    #  key: "k8s-magedu-app1"
     
    #  db: 1
     
    #  timeout: 5
     
    #  password: "123456"
    • 运行命令
     
    root@k8s-master1:~1.webapp-filebeat-image-Dockerfile# cat run_tomcat.sh 
     
    #!/bin/bash
     
    #echo "nameserver 223.6.6.6" > /etc/resolv.conf
     
    #echo "192.168.7.248 k8s-vip.example.com" >> /etc/hosts
     
     
     
    /usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -path.home /usr/share/filebeat -path.config /etc/filebeat -path.data /var/lib/filebeat -path.logs /var/log/filebeat &
     
    su - tomcat -c "/apps/tomcat/bin/catalina.sh start"
     
    tail -f /etc/hosts
    • k8s filebeat 账号
     
    #如果你是通过daemset部署filebeat那么是需要授权的但是目前的filebeat是在pod中运行的这个服务账号可以先不执行
     
    root@k8s-master1:~3.container-filebeat-process# cat 2.filebeat-serviceaccount.yaml 
     
    ---
     
    apiVersion: rbac.authorization.k8s.io/v1
     
    kind: ClusterRole
     
    metadata:
     
      name: filebeat-serviceaccount-clusterrole
     
      labels:
     
        k8s-app: filebeat-serviceaccount-clusterrole
     
    rules:
     
    - apiGroups: [""] # "" indicates the core API group
     
      resources:
     
      - namespaces
     
      - pods
     
      - nodes
     
      verbs:
     
      - get
     
      - watch
     
      - list
     
     
     
    ---
     
    apiVersion: rbac.authorization.k8s.io/v1
     
    kind: ClusterRoleBinding
     
    metadata:
     
      name: filebeat-serviceaccount-clusterrolebinding
     
    subjects:
     
    - kind: ServiceAccount
     
      name: default
     
      namespace: magedu
     
    roleRef:
     
      kind: ClusterRole
     
      name: filebeat-serviceaccount-clusterrole
     
      apiGroup: rbac.authorization.k8s.io
    • YAML
     
    root@k8s-master1:~3.container-filebeat-process# cat 3.tomcat-app1.yaml 
     
    kind: Deployment
     
    #apiVersion: extensions/v1beta1
     
    apiVersion: apps/v1
     
    metadata:
     
      labels:
     
        app: magedu-tomcat-app1-filebeat-deployment-label
     
      name: magedu-tomcat-app1-filebeat-deployment
     
      namespace: magedu
     
    spec:
     
      replicas: 1
     
      selector:
     
        matchLabels:
     
          app: magedu-tomcat-app1-filebeat-selector
     
      template:
     
        metadata:
     
          labels:
     
            app: magedu-tomcat-app1-filebeat-selector
     
        spec:
     
          containers:
     
          - name: magedu-tomcat-app1-filebeat-container
     
            image: harbor.magedu.net/magedu/tomcat-app1:v1-filebeat 
     
            imagePullPolicy: IfNotPresent
     
    #imagePullPolicy: Always
     
            ports:
     
            - containerPort: 8080
     
              protocol: TCP
     
              name: http
     
    env:
     
    "password"
     
    "123456"
     
    "age"
     
    "18"
     
            resources:
     
              limits:
     
                cpu: 1
     
    "512Mi"
     
              requests:
     
                cpu: 500m
     
    "512Mi"
    • service.yaml
     
    #做测试
     
    root@k8s-master1:~3.container-filebeat-process# cat 4.tomcat-service.yaml 
     
    ---
     
    kind: Service
     
    apiVersion: v1
     
    metadata:
     
      labels:
     
        app: magedu-tomcat-app1-filebeat-service-label
     
      name: magedu-tomcat-app1-filebeat-service
     
      namespace: magedu
     
    spec:
     
    type: NodePort
     
      ports:
     
      - name: http
     
        port: 80
     
        protocol: TCP
     
        targetPort: 8080
     
        nodePort: 30092
     
      selector:
     
        app: magedu-tomcat-app1-filebeat-selector
    • logstash 的配置文件传给ES
     
    root@k8s-master1:~3.container-filebeat-process# cat 5.logstash-filebeat-process-kafka-to-es.conf 
     
    input {
     
      kafka {
     
    "172.31.4.101:9092,172.31.4.102:9092,172.31.4.103:9092"
     
    "filebeat-magedu-app1"]
     
    "json"
     
      }
     
    }
     
     
     
    output {
     
    if [fields][type] == "filebeat-tomcat-catalina" {
     
        elasticsearch {
     
    "172.31.2.101:9200","172.31.2.102:9200"]
     
    "filebeat-tomcat-catalina-%{+YYYY.MM.dd}"
     
        }}
     
     
     
    if [fields][type] == "filebeat-tomcat-accesslog" {
     
        elasticsearch {
     
    "172.31.2.101:9200","172.31.2.102:9200"]
     
    "filebeat-tomcat-accesslog-%{+YYYY.MM.dd}"
     
        }}
     
     
     
    }
    • 所有的悲情叙事,都是因为你的基础体能不够