1.基础环境准备

参考文档

1.节点基础环境

系统版本

[root@k8s-node1 ~]# cat /etc/redhat-release 
CentOS Linux release 7.7.1908 (Core)
[root@k8s-node1 ~]#

三个主机:

k8s-node1	192.168.174.128
k8s-node2	192.168.174.129
k8s-node3	192.168.174.130

2.节点系统的基本设置,所有节点都要做

关闭selinux

sed -i 's/'SELINUX=enforcing'/'SELINUX=disabled'/' /etc/selinux/config

关闭防火墙

systemctl stop firewalld.service&&systemctl disable firewalld.service

关闭swap

swapoff -a && sed -i 's/.*swap.*/#&/' /etc/fstab

配置Host解析

echo -e "192.168.174.128 k8s-node1\n192.168.174.129 k8s-node2\n192.168.174.130 k8s-node3" >>/etc/hosts

配置系统参数,允许数据转发

/etc/sysctl.d/目录下,新建个k8s.conf 允许路由转发,不对bridge的数据进行处理 kubernetes1在/etc/sysctl.d/目录下,新建个k8s.conf 内容如下:

net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100

建好k8s.conf后执行 加载内核模块

modprobe br_netfilter
modprobe ip_vs

再执行

sysctl -p /etc/sysctl.d/k8s.conf

3.配置节点ssh免密钥登陆

节点1,k8s-node1

[root@k8s-node1 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2tyWwPZfUpYUBLa1uc06xDNRMMdKJa+uHwM37yQDY5M root@k8s-node1
The key's randomart image is:
+---[RSA 2048]----+
|            ooB++|
|           . o.X.|
|            ..=..|
|       .     +.B |
|        S   E & o|
|       = + o @ * |
|      . o = . X o|
|         . . + O |
|            o.. .|
+----[SHA256]-----+
[root@k8s-node1 ~]#
ssh-copy-id root@k8s-node2
ssh-copy-id root@k8s-node3

节点2,k8s-node2

[root@k8s-node2 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:jvZH/8xJbBTkiBcHWZlphbeN2Chw+e8MmoiNkgb+UlM root@k8s-node2
The key's randomart image is:
+---[RSA 2048]----+
|           .o++*.|
|        . o..** .|
|         o..o=ooo|
|      E   ..+ oo.|
|     .  S  . ..  |
|  . o  o  . .o.  |
| . o oo+.o + ++  |
|  o +.o.o + .=o. |
|   +..  ..   .=  |
+----[SHA256]-----+
[root@k8s-node2 ~]# 
ssh-copy-id root@k8s-node1
ssh-copy-id root@k8s-node3

节点3,k8s-node3

[root@k8s-node3 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:1ZamMCf1DhkjrVUIx4OfrJWTxr0sh4+S2U3y43YGeVo root@k8s-node3
The key's randomart image is:
+---[RSA 2048]----+
|        o==o.    |
|        .+** .   |
|        +*=**    |
|        .*@*.    |
|        S+.+.o   |
|        . + B E  |
|         + X =   |
|        + o B o  |
|         . o.+   |
+----[SHA256]-----+
[root@k8s-node3 ~]#
ssh-copy-id root@k8s-node1
ssh-copy-id root@k8s-node2

4.安装docker

参考前面docker文档,略

5.添加k8s用户并加入docker群组和配置sudo权限,三个节点做同样的操作

添加用户k8s并配置密码为123456

[root@k8s-node1 ~]# useradd -m k8s
[root@k8s-node1 ~]# sh -c 'echo 123456 | passwd k8s --stdin'
Changing password for user k8s.
passwd: all authentication tokens updated successfully.

加入wheel群组,这个群组具有sudo权限

[root@k8s-node1 ~]# gpasswd -a k8s wheel
Adding user k8s to group wheel

加入docker群组

[root@k8s-node1 ~]# gpasswd -a k8s docker
Adding user k8s to group docker
[root@k8s-node1 ~]#

6.安装依赖包,三个节点都需要安装

ipvs依赖ipset

yum install -y epel-release
yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp

7.创建k8s集群使用的目录,三个节点做一样的操作

注意这里使用的用户是我们前面创建好的k8s

[root@k8s-node1 ~]# mkdir -p /opt/k8s/bin
[root@k8s-node1 ~]# chown -R k8s /opt/k8s/
[root@k8s-node1 ~]# mkdir -p /etc/kubernetes/cert
[root@k8s-node1 ~]# chown -R k8s /etc/kubernetes
[root@k8s-node1 ~]# mkdir -p /etc/etcd/cert
[root@k8s-node1 ~]# chown -R k8s /etc/etcd/
[root@k8s-node1 ~]# mkdir -p /var/lib/etcd && chown -R k8s /var/lib/etcd

8.集群环境变量

#!/usr/bin/bash
# 生成 EncryptionConfig 所需的加密 key
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
# 最好使用 当前未用的网段 来定义服务网段和 Pod 网段
# 服务网段,部署前路由不可达,部署后集群内路由可达(kube-proxy 和 ipvs 保证)
SERVICE_CIDR="10.254.0.0/16"
# Pod 网段,建议 /16 段地址,部署前路由不可达,部署后集群内路由可达(flanneld 保证)
CLUSTER_CIDR="172.30.0.0/16"
# 服务端口范围 (NodePort Range)
export NODE_PORT_RANGE="8400-9000"
# 集群各机器 IP 数组
export NODE_IPS=(192.168.174.128 192.168.174.129 192.168.174.130)
# 集群各 IP 对应的 主机名数组
export NODE_NAMES=(k8s-node1 k8s-node2 k8s-node3)
# kube-apiserver 的 VIP(HA 组件 keepalived 发布的 IP)
export MASTER_VIP=192.168.174.127
# kube-apiserver VIP 地址(HA 组件 haproxy 监听 8443 端口)
export KUBE_APISERVER="https://${MASTER_VIP}:8443"
# HA 节点,VIP 所在的网络接口名称
export VIP_IF="ens33"
# etcd 集群服务地址列表
export ETCD_ENDPOINTS="https://192.168.174.128:2379,https://192.168.174.129:2379,https://192.168.174.130:2379"
# etcd 集群间通信的 IP 和端口
export ETCD_NODES="k8s-node1=https://192.168.174.128:2380,k8s-node2=https://192.168.174.129:2380,k8s-node3=https://192.168.74.130:2380"
# flanneld 网络配置前缀
export FLANNEL_ETCD_PREFIX="/kubernetes/network"
# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"
# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
export CLUSTER_DNS_SVC_IP="10.254.0.2"
# 集群 DNS 域名
export CLUSTER_DNS_DOMAIN="cluster.local."
# 将二进制目录 /opt/k8s/bin 加到 PATH 中
export PATH=/opt/k8s/bin:$PATH

打包后的变量定义见 environment.sh,后续部署时会提示导入该脚本; 把全局变量定义脚本拷贝到所有节点的 /opt/k8s/bin目录.

[root@k8s-node1 ~]# cp environment.sh /opt/k8s/bin/
[root@k8s-node1 ~]# scp environment.sh root@k8s-node2:/opt/k8s/bin/
environment.sh                                                                                        100% 1749     1.6MB/s   00:00    
[root@k8s-node1 ~]# scp environment.sh root@k8s-node3:/opt/k8s/bin/
environment.sh                                                                                        100% 1749     1.8MB/s   00:00    
[root@k8s-node1 ~]#

给与执行权限

[root@k8s-node1 ~]# chmod +x /opt/k8s/bin/*
[root@k8s-node1 ~]# ssh k8s-node2 "chmod +x /opt/k8s/bin/*"
[root@k8s-node1 ~]# ssh k8s-node3 "chmod +x /opt/k8s/bin/*"

9.可能遇到的问题记录

基础环境部署中遇到的错误记录: 可能会报错,见下:

sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory

解决方法,加载br_betfilter模块.

modprobe br_netfilter
lsmod |grep bridge
bridge                107106  1 br_netfilter
stp                    12976  1 bridge
llc                    14552  2 stp,bridge