第一部分 基础环境

1、平台规划

单master集群
多master集群

2、配置要求

master:2C 4G 20G
node:4C 8G 40G

3、集群部署方式

kubeadm方式 
二进制包方式

第二部分 系统初始化

1、关闭服务

systemctl disable --now firewalld
systemctl disable --now postfix
systemctl disable --now NetworkManager

2、关闭swap

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

3、关闭SELinux

sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

4、配置主机名

hostnamectl set-hostname k8s-master-01
hostnamectl set-hostname k8s-node-01

5、配置host

# 仅master节点
cat >>/etc/hosts<<EOF
192.168.94.138 k8s-master-01
192.168.94.139 k8s-node-01
EOF

4、将桥接的IPv4流量传递到iptables的链

cat <<EOF |tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv6.conf.all.disable_ipv6=1
EOF
sysctl --system

5、时钟同步

yum -y install chrony
systemctl enable --now chronyd && chronyc sources

6、安装yum源

curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum -y install epel-release

# 建立缓存

7、安装基础工具

yum -y install lrzsz tree unzip openssl-devel pcre-devel rsync wget tree lsof telnet zip net-tools bind-utils vim git nc psmisc jq

8、配置资源限制

echo -e 'ulimit -c unlimited'  >> /etc/profile
echo -e 'ulimit -s unlimited' >> /etc/profile
echo -e 'ulimit -SHn 65535' >> /etc/profile
echo -e 'export HISTTIMEFORMAT="%F %T `whoami` "' >>/etc/profile
# echo -e 'export TMOUT=300' >>/etc/profile
# echo -e "HISTFILESIZE=100" >>/etc/profile
source /etc/profile

cat >>/etc/security/limits.conf <<EOF
# add parameters
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF

9、内核参数优化

cat <<EOF |tee /etc/sysctl.d/sys.conf
net.core.somaxconn = 262144
net.core.netdev_max_backlog = 262144
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.ipv4.ip_forward = 1
net.ipv4.route.gc_timeout = 20
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_retries2 = 5
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_keepalive_time = 120
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_wmem = 8192 131072 16777216
net.ipv4.tcp_rmem = 32768 131072 16777216
net.ipv4.tcp_mem = 94500000 915000000 927000000
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.file-max = 6553560
EOF

sysctl --system

10、更新系统

yum -y update --exclude=kernel*
reboot

第三部分 集群安装

1、安装Docker

# 卸载旧版本
yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine


# 安装yum源
yum -y install yum-utils
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# 安装docker
yum list docker-ce --showduplicates |sort -r
yum -y install docker-ce-19.03.* docker-ce-cli-19.03.* containerd.io

# 配置docker
mkdir -pv /data/docker /etc/docker
cat <<EOF |tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"data-root": "/data/docker"
}
EOF
systemctl daemon-reload && systemctl enable --now docker

2、安装kubeadm、kubelet和kubectl

# 安装yum源
cat <<EOF |tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装软件包
yum list kubeadm --showduplicates |sort -r
yum -y install kubeadm-1.18.* kubelet-1.18.* kubectl-1.18.*

# 设置开机启动
systemctl enable kubelet

3、安装IPVS模块

# 安装ipvs
yum -y install conntrack ipvsadm ipset libseccomp sysstat

# 配置ipvs模块(本机内核版本:3.10.0-1160.el7.x86_64)
# 在内核4.19+版本nf_conntrack_ipv4已经更改为nf_conntrack;在4.19以下使用nf_conntrack_ipv4
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
cat <<EOF |tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_tables
ip_set
xt_set
ipt_set
ipip
ipt_rpfilter
ipt_REJECT
br_netfilter
nf_conntrack
EOF

systemctl enable --now systemd-modules-load.service
lsmod |grep -e ip_vs -e nf_conntrack

4、部署Master

# 创建目录
mkdir -pv /data/kubernetes
cd /data/kubernetes

# 下载镜像
kubeadm config images list
kubeadm config images pull --image-repository=registry.aliyuncs.com/google_containers

# 打印初始化配置
kubeadm config print init-defaults > kubeadm-config.yaml
---kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.94.138
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master-01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs

kubeadm init --config=kubeadm-config.yaml --upload-certs |tee kubeadm-init.log

# 配置kubectl工具
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

5、加入node节点

kubeadm join 192.168.94.138:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:4b2e7c50a1a3f9725093c0af9fbb8c5f4995a0ce5cbce03d628716ae5c0e4293

# 主节点执行
kubectl get node

6、部署网络组件

cd /etc/kubernetes
kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml

# 注意:
如果您使用自定义podCIDR(不是10.244.0.0/16),您首先需要下载上述清单并修改网络以匹配您的。

7、集群状态验证

# 启动集
systemctl enable --now kubelet

# 验证集群状态
kubectl get pod -A

# 创建pod
kubectl create deployment nginx --image=nginx
kubectl expose deploy nginx --port=80 --type=NodePort
kubectl get pod,svc
访问:http://192.168.94.138:30210 # 30210 通过查看映射到本机的端口获得

# 查看代理模式
curl localhost:10249/proxyMode
kubectl get cm kube-proxy -n kube-system -o yaml |grep mode

第四部分 高可用集群部署

1、待实现