#二进制安装版本可适用于更高版本、1.17-18.x思路基本相同

一、 基础准备

1、版本对应关系

k8s 与 docker version版本对应list

https:///kubernetes/kubernetes/releases
https://blog.csdn.net/fanren224/article/details/86573264
Kubernetes 1.15.11 --> Docker版本18.06、18.09

2、基础环境

#主机列表,3主1节点 
192.168.1.10  VIP
192.168.1.11 k8s01 
192.168.1.12 k8s02
192.168.1.13 k8s03
192.168.1.14 node01

#关闭所有防火墙以及setenforce 0
systemctl stop firewallld && systemctl disable firewalld
setenforce 0 

#免密master->node02,node03
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.12
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.13
ssh-copy-id -i /root/.ssh/id_rsa.pub root@192.168.1.11

#所有主机配置hosts
vim /etc/hosts
192.168.1.10 k8s01
192.168.1.11 k8s01 
192.168.1.12 k8s02
192.168.1.13 k8s03
192.168.1.14 node01


#所有主机同步时间
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
chronyc tracking  # 校准时间
timedatectl set-ntp yes  # 启用NTP时间同步
chronyc sources -v # 查看时间同步源状态
chronyc sourcestats -v # 查看时间同步源状态

3、系统参数

#所有主机 open files配置
cat >> /etc/security/limits.conf << EOF
* soft nproc 65536
* hard nproc 65536
* soft nofile 65536
* hard nofile 65536
EOF

查看最大句柄数
sysctl -a|grep fs.file-max

#所有主机开启路由转发
cat >> /etc/sysctl.conf << EOF
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
vm.swappiness=0
EOF
modprobe br_netfilter
sysctl -p

#所有主机加载ipvs模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

#禁用所有swap
swapoff -a
vim /etc/fstab  注销掉swap

4、kubernetes 安装源

#所有主机使用163镜像
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -Lo /etc/yum.repos.d/CentOS-Base.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo
yum clean all && yum repolist

#所有主机配置kubernetes yum源
cat  > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

二、主节点安装

1、k8s 基础组件安装与配置

#所有主机安装kubelet kubeadm kubectl,enable kubelet,禁用swap
yum clean all && yum makecache fast
yum list kubeadm --showduplicates |sort -r
yum install kubelet-1.15.11-0.x86_64 kubeadm-1.15.11-0.x86_64 kubectl-1.15.11-0.x86_64
kubeadm.x86_64
查看kubelet启动参数
systemctl cat kubelet.service
systemctl enable kubelet.service 

kubelet禁用Swap
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--fail-swap-on=false"

2、docker安装

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
查看docker版本
yum list docker-ce.x86_64 --showduplicates | sort -r
安装docker 指定版本
yum install -y yum-utils device-mapper-persistent-data lvm2 
yum makecache fast && yum -y install docker-ce-18.06.0.ce-3.el7
国内加速
mkdir -p /etc/docker/
cat > /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https:///", "https://"]
}
EOF

#启动docker
systemctl enable docker
systemctl daemon-reload
systemctl start docker

3、k8s master初始化

#打印默认配置文件
#命令
kubeadm config print init-defaults > kubeadm-init.yaml 
#
vim kubeadm-init.yaml
apiVersion: /v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.11   #k8s01实际ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s01
  taints:
  - effect: NoSchedule
    key: /master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: /v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.1.10:8443"   #VIP地址,集群入口,当有dial xxxx:8443 refused 的时候,记的使用6443,通过6443提供对外服务
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: /google_containers #修改
kind: ClusterConfiguration
kubernetesVersion: v1.15.11    #要与安装的一致
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: /v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"

#预下载镜像
kubeadm config images pull --config kubeadm-init.yaml 

#初始化安装  
kubeadm init --config kubeadm-init.yaml
kubeadm reset #清除配置

#执行kubeconfig拷贝
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
此时master是Notready的状态

三、查看状态

1、kubectl get cs

kubectl get cs
NAME                 STATUS    MESSAGE              ERROR
controller-manager   Healthy   ok                   
scheduler            Healthy   ok                   
etcd-0               Healthy   {"health": "true"}

2、kubectl get ns

#查看空间
kubectl get namespace
NAME          STATUS   AGE
default       Active   78m
kube-public   Active   78m
kube-system   Active   78m

3、kubectl get service && pod

#查看service
kubectl get services
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   79m

#显示节点详细信息
kubectl get pods -n kube-system --output=wide

4、dns测试命令

#dns测试
kubectl get pods -n kube-system

kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP          4d18h
tomcat       NodePort    10.102.87.180   <none>        8037:30000/TCP   24h

kubectl exec -it -n kube-system kube-scheduler-k8s-master ping www.sohu.com
kubectl exec -it -n kube-system kube-scheduler-k8s-master ping kubernetes  #这样是ping不通的


 
kubectl run curl --image=radial/busyboxplus:curl -it   #执行该命令需要有节点加入该集群 否则不能调度
kubectl exec -it curl-66959f6557-p8wqz /bin/sh
#
nslookup kubernetes.default
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
 
Name:      kubernetes.default
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
#
ping kubernetes
PING kubernetes (10.96.0.1): 56 data bytes
64 bytes from 10.96.0.1: seq=0 ttl=251 time=3.992 ms

5、问题

#问题
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: 
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: 
问题: 解决 ,/etc/kubernetes/manifests/kube-controller-manager.yaml  kube-scheduler.yaml
将 port=0 注释掉

四、备master节点配置

1、将证书拷贝其它节点

USER=root
CONTROL_PLANE_IPS="k8s01 k8s02"
for host in ${CONTROL_PLANE_IPS}; do
    ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
    scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
    scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
    scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done

2、其它master加入集群

#其它master加入集群,在k8s02, k8s03 执行
kubeadm join x.x.x.x:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:84201a329ec4388263e97303c6e4de50c2de2aa157a3b961cb8a6f325fadedb1 --experimental-control-plane

五、flannel安装

#k8s01节点安装即可

1、安装文件

cd /data/k8s
curl -Lo kube-flannel.yml https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

2、镜像rename

cat kube-flannel.yml |grep image|uniq
image: quay.io/coreos/flannel:v0.12.0-amd64
        image: quay.io/coreos/flannel:v0.12.0-arm64
        image: quay.io/coreos/flannel:v0.12.0-arm
        image: quay.io/coreos/flannel:v0.12.0-ppc64le
        image: quay.io/coreos/flannel:v0.12.0-s390x
从阿里云拉取镜像
docker pull /leozhanggg/flannel:v0.12.0-amd64
docker pull /leozhanggg/flannel:v0.12.0-arm64
docker pull /leozhanggg/flannel:v0.12.0-arm
docker pull /leozhanggg/flannel:v0.12.0-ppc64le
docker pull /leozhanggg/flannel:v0.12.0-s390x
将镜像改名
docker tag /leozhanggg/flannel:v0.12.0-amd64 quay.io/coreos/flannel:v0.12.0-amd64
docker tag /leozhanggg/flannel:v0.12.0-arm64 quay.io/coreos/flannel:v0.12.0-arm64
docker tag /leozhanggg/flannel:v0.12.0-arm quay.io/coreos/flannel:v0.12.0-arm
docker tag /leozhanggg/flannel:v0.12.0-ppc64le quay.io/coreos/flannel:v0.12.0-ppc64le
docker tag /leozhanggg/flannel:v0.12.0-s390x quay.io/coreos/flannel:v0.12.0-s390x

3、多网卡

#多网卡添加iface

##188行  - /opt/bin/flanneld
##189行  args:
##190行  - --ip-masq
##191行  - --kube-subnet-mgr
##192行  - --iface=eth1   #新增

kubectl apply -f flannel.yaml

状态查看

[root@k8s01 k8s]# kubectl get pods -n kube-system -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP             NODE    NOMINATED NODE   READINESS GATES
coredns-645bfc575f-dds6h        1/1     Running   0          14m     10.244.1.2     k8s02   <none>           <none>
coredns-645bfc575f-kqvzh        1/1     Running   0          14m     10.244.1.3     k8s02   <none>           <none>
etcd-k8s01                      1/1     Running   0          13m     192.168.1.31   k8s01   <none>           <none>
etcd-k8s02                      1/1     Running   0          9m16s   192.168.1.32   k8s02   <none>           <none>
kube-apiserver-k8s01            1/1     Running   0          13m     192.168.1.31   k8s01   <none>           <none>
kube-apiserver-k8s02            1/1     Running   0          9m17s   192.168.1.32   k8s02   <none>           <none>
kube-controller-manager-k8s01   1/1     Running   1          13m     192.168.1.31   k8s01   <none>           <none>
kube-controller-manager-k8s02   1/1     Running   0          9m17s   192.168.1.32   k8s02   <none>           <none>
kube-flannel-ds-amd64-87drf     1/1     Running   0          6m15s   192.168.1.31   k8s01   <none>           <none>
kube-flannel-ds-amd64-mt48w     1/1     Running   0          6m15s   192.168.1.32   k8s02   <none>           <none>
kube-proxy-wk7g2                1/1     Running   0          14m     192.168.1.31   k8s01   <none>           <none>
kube-proxy-zbr98                1/1     Running   0          9m17s   192.168.1.32   k8s02   <none>           <none>
kube-scheduler-k8s01            1/1     Running   1          13m     192.168.1.31   k8s01   <none>           <none>
kube-scheduler-k8s02            1/1     Running   0          9m17s   192.168.1.32   k8s02   <none>           <none>

六、node加入集群

node02,node03加入master集群
#node02 node03,安装kubeadm 组件
yum install kubelet-1.15.11-0.x86_64 kubeadm-1.15.11-0.x86_64 kubectl-1.15.11-0.x86_64
systemctl enable kubelet.service (禁用swap,/etc/sysconfig/kubelet --> KUBELET_EXTRA_ARGS="--fail-swap-on=false" 
systemctl start docker

#node节点加入
kubeadm join 192.168.1.10:8443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:84201a329ec4388263e97303c6e4de50c2de2aa157a3b961cb8a6f325fadedb1

七、问题处理

1、token 过期

#token 24小时过期
kubeadm token list
kubeadm token create
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed  's/^ .* //'
kubeadm join 192.168.1.30:8443 --token m7clgf.iuy4qpq3n8l62wqk --discovery-token-ca-cert-hash

#示例
kubeadm join 192.168.88.129:6443 --token hjc4qt.70spaw5dje3e2w7e \
	--discovery-token-ca-cert-hash sha256:6d983ee151fb6bd9bddd5a8b23072cb1035263dd7db0b2d1a09e5fb1b843dbb7

2、删除节点

kubectl drain kube-node1 --delete-local-data --force --ignore-daemonsets   # 在master上执行
kubectl delete node kube-node1

3、拉取镜像超时

#拉取镜像需更改kubelet的超时时间
vim /etc/sysconfig/kubelet 
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
KUBELET_OPTS=–image-pull-progress-deadline=60m

4、master污点问题

#master参与节点问题
使用kubeadm初始化的集群,出于安全考虑,Pod不会被调度到master节点,即master不参与工作负载
原因是 master节点被打上了污点:/master:NoSchedule
若要让master参与工作负载,取消污点即可
查看污点
kubectl describe node kube-master|grep Taints
Taints:             /master:NoSchedule
取消污点
kubectl taint nodes kube-master /master:-
添加污点
kubectl taint nodes kube-master /master=:NoSchedule

八、dashboard 安装

基本上不用

curl -Lo kubernetes-dashboard.yaml https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
#改为aliyun镜像
sed -i "s##/google_containers#g" kubernetes-dashboard.yaml 

#修改dashboard service 为NodePort,将replicaset pods 个数修改下
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 32767
  selector:
    k8s-app: kubernetes-dashboard

kubectl apply -f kubernetes-dashboard.yaml
kubectl get pods -n kube-system --output=wide
访问
https://192.168.1.110:32767

#登陆创建admin
vi dashboard-admin.yaml
###
apiVersion: /v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: 
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
  
kubectl create -f dashboard-admin.yaml

#获取token
kubectl -n kube-system get secret | grep dashboard
kubectl -n kube-system describe secret kubernetes-dashboard-token-27r7n

#keepalived 没什么好说的,主备模式,k8s01 vip