使用Docker-Compose快速部署ElasticSearch集群(3节点集群)

Elasticsearch  作为一个搜索引擎,我们对它的基本要求就是存储海量数据并且可以在非常短的时间内查询到我们想要的信息。所以第一步我们需要保证的就是  Elasticsearch  的高可用性,什么是高可用性呢?它通常是指,通过设计减少系统不能提供服务的时间。假设系统一直能够提供服务,我们说系统的可用性是  100%。如果系统在某个时刻宕掉了,比如某个网站在某个时间挂掉了,那么就可以它临时是不可用的。所以,为了保证 Elasticsearch  的高可用性,我们就应该尽量减少 Elasticsearch 的不可用时间

 

前言

针对一个索引,Elasticsearch 中其实有专门的衡量索引健康状况的标志,分为三个等级:

  • green,绿色。这代表所有的主分片和副本分片都已分配。你的集群是 100% 可用的。
  • yellow,黄色。所有的主分片已经分片了,但至少还有一个副本是缺失的。不会有数据丢失,所以搜索结果依然是完整的。不过,你的高可用性在某种程度上被弱化。如果更多的分片消失,你就会丢数据了。所以可把 yellow 想象成一个需要及时调查的警告。
  • red,红色。至少一个主分片以及它的全部副本都在缺失中。这意味着你在缺少数据:搜索只能返回部分数据,而分配到这个分片上的写入请求会返回一个异常。

如果你只有一台主机的话,其实索引的健康状况也是 yellow,因为一台主机,集群没有其他的主机可以存放副本,所以说,这就是一个不健康的状态,因此集群也是十分有必要的。另外,既然是群集,那么存储空间肯定也是联合起来的,假如一台主机的存储空间是固定的,那么集群它相对于单个主机也有更多的存储空间,可存储的数据量也更大。

 

Docker-Compose配置文件

version: '3.7'
services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch1
    environment:
      - node.name=elasticsearch1
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 1G
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata1
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9200:9200
      - 9300:9300
  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch2
    environment:
      - node.name=elasticsearch2
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M"
      - "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata2
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9201:9200
  elasticsearch3:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch3
    environment:
      - node.name=elasticsearch3
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M"
      - "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=_eth0_
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 5s
        max_attempts: 3
        window: 10s
    volumes:
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata3
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
      - ingress
    ports:
      - 9202:9200
  kibana:
    image: docker.elastic.co/kibana/kibana:7.8.0
    container_name: kibana
    environment:
      SERVER_NAME: localhost
      ELASTICSEARCH_URL: http://elasticsearch1:9200/
    ports:
      - 5601:5601
    volumes:
      - type: volume
        source: logs
        target: /var/log
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    deploy:
      replicas: 1
      update_config:
        parallelism: 1
        delay: 10s
      resources:
        limits:
          cpus: '1'
          memory: 256M
        reservations:
          cpus: '1'
          memory: 256M
      restart_policy:
        condition: on-failure
        delay: 30s
        max_attempts: 3
        window: 120s
    networks:
      - elastic
      - ingress
  auditbeat:
    image: docker.elastic.co/beats/auditbeat:7.8.0
    command: auditbeat -e -strict.perms=false
    user: root
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add: ['AUDIT_CONTROL', 'AUDIT_READ']
    pid: "host"
    volumes:
    #   - ${PWD}/configs/auditbeat.docker.yml:/usr/share/auditbeat/auditbeat.yml
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - elastic
  metricbeat:
    image: docker.elastic.co/beats/metricbeat:7.8.0
    # command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add:
      - AUDIT_CONTROL
      - AUDIT_READ
    volumes:
      # - ${PWD}/configs/metricbeat.docker.yml:/usr/share/metricbeat/metricbeat.yml
      - /var/run/docker.sock:/var/run/docker.sock:ro
      - /sys/fs/cgroup:/hostfs/sys/fs/cgroup:ro
      - /proc:/hostfs/proc:ro
      - /:/hostfs:ro
    networks:
      - elastic

  heartbeat:
    image: docker.elastic.co/beats/heartbeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    # volumes:
    #   - ${PWD}/configs/heartbeat.docker.yml:/usr/share/heartbeat/heartbeat.yml
    networks:
      - elastic

  packetbeat:
    image: docker.elastic.co/beats/packetbeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    cap_add:
      - NET_RAW
      - NET_ADMIN
    # volumes:
    #   - ${PWD}/configs/packetbeat.docker.yml:/usr/share/packetbeat/packetbeat.yml
    networks:
      - elastic

  filebeat:
    image: docker.elastic.co/beats/filebeat:7.8.0
    command: --strict.perms=false
    environment:
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    ports:
      - 9000:9000
    volumes:
      # - ${PWD}/configs/filebeat.docker.yml:/usr/share/filebeat/filebeat.yml
      - /var/lib/docker/containers:/var/lib/docker/containers:ro
      - /var/run/docker.sock:/var/run/docker.sock
    networks:
      - elastic
  apmserver:
    image: docker.elastic.co/apm/apm-server:7.8.0
    command: --strict.perms=false
    ports:
      - 8200:8200
      - 8201:8200
    environment:
      - apm-server.host=0.0.0.0
      - setup.kibana.host=kibana:5601
      - output.elasticsearch.hosts=["elasticsearch:9200"]
    # volumes:
    #   - ${PWD}/configs/apm-server.yml:/usr/share/apm-server/apm-server.yml
    networks:
        - elastic
  app-search:
    image: docker.elastic.co/app-search/app-search:7.6.2
    ports:
      - 3002:3002
    environment:
      secret_session_key: supersecretsessionkey
      elasticsearch.host: http://elasticsearch1:9200/
      allow_es_settings_modification: "true"
    networks:
        - elastic
  nginx:
    image: nginx:latest
    ports:
        - 8881:80
    volumes:
        - ${PWD}/nginx-config/:/etc/nginx/conf.d/
    command: /bin/bash -c "nginx -g 'daemon off;'"
    ulimits:
      nproc: 65535
    networks:
      - ingress
volumes:
  esdata1:
  esdata2:
  esdata3:
  logs:

networks:
  elastic:
  ingress:

# configs:
#   auditbeat_config:
#     file: configs/auditbeat.docker.yml
#   filebeat_config:
#     file: configs/filebeat.docker.yml
#   heartbeat_config:
#     file: configs/heartbeat.docker.yml
#   metricbeat_config:
#     file: configs/metricbeat.docker.yml
#   packetbeat_config:
#     file: configs/packetbeat.docker.yml

注意:在elastic启动目录中保存此yml配置文件,保存名为docker-compose.yml,同时在目录中新建nginx-config目录,在目录中新建名为nginx.conf的配置文件,配置文件内容如下文件所述。

 

Nginx.Conf配置文件

upstream kibana {
    server kibana:5601;
}

upstream elasticsearch {
    server elasticsearch1:9200;
    server elasticsearch2:9201;
    server elasticsearch3:9202;
}

server {
    listen 80;

    location / {
        proxy_pass http://kibana;
        proxy_redirect off;
    }
    
    location /elasticsearch {
        proxy_pass            http://elasticsearch/;
        proxy_read_timeout    90;
        proxy_connect_timeout 90;
        proxy_set_header      Host $host;
        proxy_set_header      X-Real-IP $remote_addr;
        proxy_set_header      X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_set_header      Connection "Keep-Alive";
        proxy_set_header      Proxy-Connection "Keep-Alive";
        proxy_set_header      Proxy "";
    }
}

 目录结构

ES高可用需要几台_ES高可用需要几台

注:目录结构如上,注意不需要images目录,也不需要.gitignore、README.md,当然加上也不影响。

 

启动应用

执行命令:

docker-compose up -d

注:第一次执行可能需要下载镜像,时间较长,后续则比较快。

 

集群信息

执行命令:

curl http://localhost:9200/_nodes?pretty=true

启动后,执行上述命令,查看集群节点信息。

 

ES高可用需要几台_ES高可用需要几台_02

 

部署问题

  • 参数问题 discovery.zen.ping.unicast.hosts参数和discovery.seed_hosts参数不能同时配置,注释掉discovery.zen.ping.unicast.hosts
  • 使用docker-compose部署后,一段时间后,所有机器都挂了,具体找了一下原因,可能是系统参数要调整一下

ES高可用需要几台_搜索引擎_03

ES高可用需要几台_搜索引擎_04

[root@localhost ~]# vim /etc/security/limits.conf
在文件的末尾加上
elasticsearch soft nofile 65536
elasticsearch hard nofile 65536
elasticsearch soft nproc 4096
elasticsearch hard nproc 4096

[root@localhost security]# vim /etc/sysctl.conf 
vm.max_map_count = 655360
在文件最底部加上上面内容

[root@localhost security]# sysctl -p
查看是否填加成功
vm.max_map_count = 655360
  • 上述的docker-compose.yml配置文件启动的服务太多,可能我们只需要elasticsearch/kibana即可,下面把调整后的配置文件写上
version: '3.7'
services:
  elasticsearch1:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch1
    environment:
      - node.name=elasticsearch1
      - network.publish_host=elasticsearch1
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1,elasticsearch2,elasticsearch3
      - "discovery.seed_hosts=elasticsearch1,elasticsearch2,elasticsearch3"
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=0.0.0.0
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    volumes: #restart: always
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata1
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
    ports:
      - 9200:9200
      - 9300:9300

  elasticsearch2:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch2
    environment:
      - node.name=elasticsearch2
      - network.publish_host=elasticsearch2
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1,elasticsearch2,elasticsearch3
      - "discovery.seed_hosts=elasticsearch1,elasticsearch2,elasticsearch3"
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M"  #- "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=0.0.0.0
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    volumes: #restart: always
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata2
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
    ports:
      - 9201:9200
      - 9301:9300

  elasticsearch3:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.8.0
    container_name: elasticsearch3
    environment:
      - node.name=elasticsearch3
      - network.publish_host=elasticsearch3
      - cluster.name=docker-cluster
      - cluster.initial_master_nodes=elasticsearch1,elasticsearch2,elasticsearch3
      - "discovery.seed_hosts=elasticsearch1,elasticsearch2,elasticsearch3"
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms256M -Xmx256M" #- "discovery.zen.ping.unicast.hosts=elasticsearch1"
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - network.host=0.0.0.0
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    volumes: #restart: always
      - type: volume
        source: logs
        target: /var/log
      - type: volume
        source: esdata3
        target: /usr/share/elasticsearch/data
    networks:
      - elastic
    ports:
      - 9202:9200
      - 9302:9300

  kibana:
    image: docker.elastic.co/kibana/kibana:7.8.0
    container_name: kibana
    environment:
      SERVER_NAME: localhost
      ELASTICSEARCH_URL: http://elasticsearch1:9200/
    ports:
      - 5601:5601
    volumes:
      - type: volume
        source: logs
        target: /var/log
    ulimits:
      nproc: 65535
      memlock:
        soft: -1
        hard: -1
    cap_add:
      - ALL
    networks:
      - elastic

volumes:
  esdata1:
  esdata2:
  esdata3:
  logs:

networks:
  elastic: