首先需要部署多个master,每个master节点需要部署keepalived,keepalived可以检测master节点状态,并且提供虚拟ip配置,当一个master出现故障这个虚拟ip会飘到其他的master节点。每个master节点还需要部署haproxy,haproxy是一个负载均衡器,对于master请求做负载均衡。

        接下来部署由两主个主节点和一个工作节点组成的集群,还需要设置一个虚拟ip,虚拟ip可以自己设置,但是要和主节点在一个网段。

一 初始化环境以及部署keepalived

# 初始化环境在所有节点执行

# 关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

# 关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config  # 永久
setenforce 0  # 临时

# 关闭swap
swapoff -a  # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab    # 永久

# 根据规划设置主机名
hostnamectl set-hostname <hostname>

# 在master添加hosts
cat >> /etc/hosts << EOF
自定义虚拟ip(和master同网段)    master.k8s.io   k8s-vip
master1 ip    master01.k8s.io master1
master2 ip    master02.k8s.io master2
node1 ip    node01.k8s.io   node1
EOF

# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system  # 生效

# 时间同步
yum install ntpdate -y
ntpdate time.windows.com
# 部署keepalived

# 安装相关包和keepalived
yum install -y conntrack-tools libseccomp libtool-ltdl

yum install -y keepalived

# master1节点配置  需要修改vip和网卡名称(如果真实网卡名不是ens33 需要改为真实的网卡名称)
cat > /etc/keepalived/keepalived.conf <<EOF 
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state MASTER 
    interface ens33 
    virtual_router_id 51
    priority 250
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        这里写hosts配置的虚拟ip
    }
    track_script {
        check_haproxy
    }

}
EOF


# master2节点配置 需要修改vip和网卡名称(如果真实网卡名不是ens33 需要改为真实的网卡名称)
cat > /etc/keepalived/keepalived.conf <<EOF 
! Configuration File for keepalived

global_defs {
   router_id k8s
}

vrrp_script check_haproxy {
    script "killall -0 haproxy"
    interval 3
    weight -2
    fall 10
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP 
    interface ens33 
    virtual_router_id 51
    priority 200
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass ceb1b3ec013d66163d6ab
    }
    virtual_ipaddress {
        这里是hosts设置的虚拟ip
    }
    track_script {
        check_haproxy
    }

}
EOF
# 在两个master节点启动keepalived

# 启动keepalived
$ systemctl start keepalived.service
设置开机启动
$ systemctl enable keepalived.service
# 查看启动状态
$ systemctl status keepalived.service
# 查看keepalived 虚拟ip配置,会发现当前虚拟ip在master1上,当master1发生故障时候,配置的虚拟ip会漂移到master2上

[root@localhost ~]# ip a s ens33

二 部署haproxy

yum install -y haproxy

两台master节点的配置均相同,配置中声明了后端代理的两个master节点服务器,指定了haproxy运行的端口为16443等,因此16443端口为集群的入口

# 配置文件 需要修改hosts配置的ip

cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2
    
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon 
       
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------  
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#--------------------------------------------------------------------- 
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver    
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      master01.k8s.io   hosts配置的master1的ip:6443 check
    server      master02.k8s.io   hosts配置的master2的ip:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF
# 两台master启动hosts

# 设置开机启动
$ systemctl enable haproxy
# 开启haproxy
$ systemctl start haproxy
# 查看启动状态
$ systemctl status haproxy
# 检查端口

netstat -lntup|grep haproxy

三 所有节点安装Docker/kubeadm/kubelet

1 安装docker

$ wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
$ yum -y install docker-ce-18.06.1.ce-3.el7
$ systemctl enable docker && systemctl start docker
$ docker --version
Docker version 18.06.1-ce, build e68fc7a


# 配置镜像加速
$ cat > /etc/docker/daemon.json << EOF
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
EOF

# 重启docker
systemctl restart docker

2 添加阿里云YUM软件源

$ cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

3 安装kubeadm,kubelet和kubectl

$ yum install -y kubelet-1.16.3 kubeadm-1.16.3 kubectl-1.16.3
$ systemctl enable kubelet

四 部署虚拟ip所在的master节点

在具有vip的master上操作,这里为master1

$ mkdir /usr/local/kubernetes/manifests -p

$ cd /usr/local/kubernetes/manifests/

$ vi kubeadm-config.yaml

# kubeadm-config.yaml 内容需要自上而下修改的三个ip:vip , master1 ip , master2 ip

apiServer:
  certSANs:
    - master1
    - master2
    - master.k8s.io
    - 设置的虚拟ip
    - 虚拟ip所在master的ip(通常是master1的ip)
    - master2的ip
    - 127.0.0.1
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:16443"
controllerManager: {}
dns: 
  type: CoreDNS
etcd:
  local:    
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.16.3
networking: 
  dnsDomain: cluster.local  
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.1.0.0/16
scheduler: {}
# 初始化集群
kubeadm init --config kubeadm-config.yaml

对上一操作的日志做好备份,接下来会用到日志内容,按照日志提示配置环境变量,使用kubectl工具:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
$ kubectl get nodes
$ kubectl get pods -n kube-system

查看节点状态是NotReady 需要安装网络插件

mkdir flannel
cd flannel
wget -c https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
# 查看安装情况 如果有镜像下载失败需要移出重新安装
kubectl get pods -n kube-system

# 移除
[root@master1 flannel]# kubectl delete -f kube-flannel.yml

# 重新安装 直到所有镜像为running状态 此时查看节点状态为ready
[root@master1 flannel]# kubectl apply -f kube-flannel.yml

五 部署其他Master和所有Worker节点

1 master2加入集群

#从master1复制密钥及相关文件到master2

ssh root@master2的ip mkdir -p /etc/kubernetes/pki/etcd

scp /etc/kubernetes/admin.conf root@master2的ip:/etc/kubernetes
   
scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.44.156:/etc/kubernetes/pki
   
scp /etc/kubernetes/pki/etcd/ca.* root@master2的ip:/etc/kubernetes/pki/etcd
# 执行master1初始化集群输出日志中master节点加入命令
  kubeadm join master.k8s.io:16443 --token 36saf6.2g9j9yj2229tpsj7 \
    --discovery-token-ca-cert-hash sha256:27aa9520ec7d9085b433ca9e6b4eddcdeb78afad0709aac47de17725652ff7e9 \
    --control-plane
# 值master2加入时输出日志环境,执行该操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 检查状态
[root@master1 flannel]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
master1   Ready    master   45m     v1.16.3
master2   Ready    master   5m34s   v1.16.3
[root@master1 flannel]# kubectl get pods --all-namespaces
NAMESPACE     NAME                              READY   STATUS    RESTARTS   AGE
kube-system   coredns-58cc8c89f4-7bmcb          1/1     Running   0          45m
kube-system   coredns-58cc8c89f4-cnlrk          1/1     Running   0          45m
kube-system   etcd-master1                      1/1     Running   0          44m
kube-system   etcd-master2                      1/1     Running   0          5m52s
kube-system   kube-apiserver-master1            1/1     Running   0          44m
kube-system   kube-apiserver-master2            1/1     Running   0          5m52s
kube-system   kube-controller-manager-master1   1/1     Running   1          44m
kube-system   kube-controller-manager-master2   1/1     Running   0          5m53s
kube-system   kube-flannel-ds-gbmfd             1/1     Running   0          5m53s
kube-system   kube-flannel-ds-l7x4m             1/1     Running   0          27m
kube-system   kube-proxy-hjr5j                  1/1     Running   0          45m
kube-system   kube-proxy-qth9t                  1/1     Running   0          5m53s
kube-system   kube-scheduler-master1            1/1     Running   1          44m
kube-system   kube-scheduler-master2            1/1     Running   0          5m53s

六 部署WorkNode

执行master节点初始化集群时打印的work节点加入的日志

kubeadm join master.k8s.io:16443 --token 36saf6.2g9j9yj2229tpsj7 \
    --discovery-token-ca-cert-hash sha256:27aa9520ec7d9085b433ca9e6b4eddcdeb78afad0709aac47de17725652ff7e9
# 在master节点上查看集群节点状态,过会自动变为ready
[root@master1 ~]# kubectl get nodes
NAME      STATUS     ROLES    AGE    VERSION
master1   Ready      master   111m   v1.16.3
master2   Ready      master   71m    v1.16.3
node1     NotReady   <none>   24s    v1.16.3
[root@master1 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
master1   Ready    master   113m    v1.16.3
master2   Ready    master   73m     v1.16.3
node1     Ready    <none>   2m39s   v1.16.3

至此 集群搭建完成。

测试部署Nginx

[root@master1 ~]# kubectl create deploy nginx --image=nginx
deployment.apps/nginx created
[root@master1 ~]# kubectl expose deploy nginx --port=80 --target-port=80 --type=NodePort
service/nginx exposed
[root@master1 ~]# kubectl get pods,svc
NAME                         READY   STATUS              RESTARTS   AGE
pod/nginx-86c57db685-bmm6k   0/1     ContainerCreating   0          51s

NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.1.0.1     <none>        443/TCP        116m
service/nginx        NodePort    10.1.72.72   <none>        80:30237/TCP   14s

访问 nginx

http:节点IP:服务端口

HAProxy高可用集群 高可用集群部署_docker