k8s master 负载均衡
1. 服务器规划
说明:只实现master负载均衡
服务器名称 | IP | 角色 |
k8s-master1 | 192.168.1.107 | k8s-master1、etcd |
k8s-master2 | 192.168.1.108 | k8s-master2 |
k8s-node1 | 192.168.1.109 | k8s-node1 |
nginx | 192.168.1.55 | nginx负载 |
2.k8s-master1 部署
1.安装Docker
# 关闭防火墙
ufw disable && ufw status
# 执行脚本安装docker
curl -s https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
# 修改docker.server参数
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
# 重新加载docker.server及重启docker服务
systemctl daemon-reload && service docker restart
service docker status
2. 生成配置文件及根证书
# 添加内核参数
/etc/sysctl.d/k8s.conf
# 参数说明:
# Controls IP packet forwarding
net.ipv4.ip_forward = 1
# Enable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
# 执行命令,添加内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 使内核参数生效
sysctl -p /etc/sysctl.d/k8s.conf
# 如提示以下报错,则执行:modprobe br_netfilter
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
# 获取k8s二进制文件及配置文件
# kubernetes.git 并非官网的文件,是自定义安装k8s集群所需的文件
git clone https://code.aliyun.com/jy1779/kubernetes.git
tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/ && rm -f ./kubernetes/kubernetes-bins.tar.gz
echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile
# 检测环境变量
which kubectl
/usr/local/sbin/kubernetes-bins/kubectl
# 生成配置文件
cd /root/kubernetes/kubernetes-starter/
# 修改配置文件
vim config.properties
#kubernetes二进制文件目录,eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#当前节点ip, eg: 192.168.1.102
NODE_IP=192.168.1.107
#etcd服务集群列表, eg: http://192.168.1.102:2379
#如果已有etcd集群可以填写现有的。没有的话填写:http://${MASTER_IP}:2379 (MASTER_IP自行替换成自己的主节点ip)
ETCD_ENDPOINTS=https://192.168.1.107:2379
#kubernetes主节点ip地址, eg: 192.168.1.102
MASTER_IP=192.168.1.107
# 执行脚本生成配置文件
./ with-ca
====替换变量列表====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.107
ETCD_ENDPOINTS=https://192.168.1.107:2379
MASTER_IP=192.168.1.107
====================
====替换配置文件====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
配置生成成功,位置: /root/kubernetes/kubernetes-starter/target
# 安装cfssl
wget -q --show-progress --https-only --timestamping \
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
# 修改为可执行权限
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
# 移动到bin目录
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
# 验证
cfssl version
# 生成根证书
# 创建目录存放ca证书
mkdir -p /etc/kubernetes/ca
# 提示:ca-config.json、ca-csr.json事先已经准备好,可修改,也可以自己生成
# 复制ca文件
cp ~/kubernetes/kubernetes-starter/target/ca/ca-config.json /etc/kubernetes/ca
cp ~/kubernetes/kubernetes-starter/target/ca/ca-csr.json /etc/kubernetes/ca
# 生成证书和密钥
cd /etc/kubernetes/ca
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# 查看证书和密钥
ls
ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem
3. 部署Etcd
etcd节点需要提供给其他服务访问,就要验证其他服务的身份,所以需要一个标识自己监听服务的server证书,当有多个etcd节点的时候也需要client证书与etcd集群其他节点交互,当然也可以client和server使用同一个证书因为它们本质上没有区别。
# 创建存放etcd证书的目录
mkdir -p /etc/kubernetes/ca/etcd
# 复制etcd证书配置
cp ~/kubernetes/kubernetes-starter/target/ca/etcd/etcd-csr.json /etc/kubernetes/ca/etcd/
cd /etc/kubernetes/ca/etcd/
# 修改etcd-csr.json配置文件
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.1.107",
"192.168.1.108", #添加k8s-master2的IP
"192.168.1.55" #添加nginx负载均衡的IP
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "XS",
"O": "k8s",
"OU": "System"
}
]
}
# 复制etcd证书配置
cp ~/kubernetes/kubernetes-starter/target/ca/etcd/etcd-csr.json /etc/kubernetes/ca/etcd/
cd /etc/kubernetes/ca/etcd/
# 使用根证书(ca.pem)签发etcd证书
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
# 跟之前类似生成三个文件etcd.csr是个中间证书请求文件,我们最终要的是etcd-key.pem和etcd.pem
ls
etcd.csr etcd-csr.json etcd-key.pem etcd.pem
# 创建工作目录(保存数据的地方)
mkdir -p /var/lib/etcd
# 把etcd服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/etcd.service /lib/systemd/system/
# 创建etcd服务
systemctl enable etcd.service
# 启动etcd服务
service etcd start
# 查看服务日志,看是否有错误信息,确保服务正常
journalctl -f -u etcd.service
# 测试etcd服务是否正常
ETCDCTL_API=3 etcdctl \
--endpoints=https://192.168.1.107:2379 \
--cacert=/etc/kubernetes/ca/ca.pem \
--cert=/etc/kubernetes/ca/etcd/etcd.pem \
--key=/etc/kubernetes/ca/etcd/etcd-key.pem \
endpoint health
# 显示以下则为部署成功。
https://192.168.1.107:2379 is healthy: successfully committed proposal: took = 10.408412ms
4.部署APIServer
# 创建存放api证书目录
mkdir -p /etc/kubernetes/ca/kubernetes
# 复制apiserver证书配置
cp ~/kubernetes/kubernetes-starter/target/ca/kubernetes/kubernetes-csr.json /etc/kubernetes/ca/kubernetes/
# 使用根证书(ca.pem)签发kubernetes证书
cd /etc/kubernetes/ca/kubernetes/
# 修改kubernetes-csr.json配置文件
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.1.107",
"192.168.1.108", #添加k8s-master2的IP
"192.168.1.55", #添加nginx负载均衡的iP
"10.68.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "XS",
"O": "k8s",
"OU": "System"
}
]
}
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
# 跟之前类似生成三个文件kubernetes.csr是个中间证书请求文件,我们最终要的是kubernetes-key.pem和kubernetes.pem
ls
kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem
# 生成token认证文件
# 生成随机token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
97e8c07dce2b2bab69cfd3162d5383c9
# 写入token.csv文件
echo "97e8c07dce2b2bab69cfd3162d5383c9,kubelet-bootstrap,10001,"system:kubelet-bootstrap"" > /etc/kubernetes/ca/kubernetes/token.csv
# 把apiservice服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service /lib/systemd/system/
# 创建kube-apiserver服务
systemctl enable kube-apiserver.service
# 启动kube-apiserver服务
service kube-apiserver start
# 查看kube-apiserver日志
journalctl -f -u kube-apiserver
# 默认api允许服务端口,可以修改,比如80端口,如果不修改就无法映射
cat ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service |grep port-range
--service-node-port-range=20000-40000 \
5.部署Controller-manager
controller-manager一般与api-server在同一台机器上,所以可以使用非安全端口与api-server通讯,不需要生成证书和私钥。
# 把kube-controller-manager.service 服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-controller-manager.service /lib/systemd/system/
# 创建kube-controller-manager.service 服务
systemctl enable kube-controller-manager.service
# 启动kube-controller-manager.service 服务
service kube-controller-manager start
# 查看kube-controller-manager.service 日志
journalctl -f -u kube-controller-manager
6. 部署Scheduler
Scheduler一般与api-server在同一台机器上,所以可以使用非安全端口与api-server通讯,不需要生成证书和私钥。
# 把scheduler 服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-scheduler.service /lib/systemd/system/
# 创建kube-scheduler.service 服务
systemctl enable kube-scheduler.service
# 启动kube-scheduler.service 服务
service kube-scheduler start
# 查看kube-scheduler.service 日志
journalctl -f -u kube-scheduler
7.配置Kubectl管理
# 创建存放kubectl证书目录
mkdir -p /etc/kubernetes/ca/admin
# 准备admin证书配置 - kubectl只需客户端证书,因此证书请求中 hosts 字段可以为空
# 复制kubectl证书配置
cp ~/kubernetes/kubernetes-starter/target/ca/admin/admin-csr.json /etc/kubernetes/ca/admin/
# 使用根证书(ca.pem)签发admin证书
cd /etc/kubernetes/ca/admin/
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
# 我们最终要的是admin-key.pem和admin.pem
ls
admin.csr admin-csr.json admin-key.pem admin.pem
# 配置kubectl文件
# 指定apiserver的地址和证书位置
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ca/ca.pem \
--embed-certs=true \
--server=https://192.168.1.107:6443
# 设置客户端认证参数,指定admin证书和秘钥
kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ca/admin/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ca/admin/admin-key.pem
# 关联用户和集群
kubectl config set-context kubernetes \
--cluster=kubernetes --user=admin
# 设置当前上下文
kubectl config use-context kubernetes
# 设置结果就是一个配置文件,可以看看内容
cat ~/.kube/config
# 验证master组件
root@k8s-master1:/etc/kubernetes/ca/calico# kubectl get cs
NAME STATUS MESSAGE ERROR
etcd-0 Healthy {"health": "true"}
controller-manager Healthy ok
scheduler Healthy ok
# 创建kubelet-bootstrap绑定
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
8.部署Calico网络
Calico实现了CNI接口,是kubernetes网络方案的一种选择,它一个纯三层的数据中心网络方案(不需要Overlay),并且与OpenStack、Kubernetes、AWS、GCE等IaaS和容器平台都有良好的集成。 Calico在每一个计算节点利用Linux Kernel实现了一个高效的vRouter来负责数据转发,而每个vRouter通过BGP协议负责把自己上运行的workload的路由信息像整个Calico网络内传播——小规模部署可以直接互联,大规模下可通过指定的BGP route reflector来完成。 这样保证最终所有的workload之间的数据流量都是通过IP路由的方式完成互联的。
# calico证书用在四个地方:
# calico/node: 这个docker 容器运行时访问 etcd 使用证书
# cni 配置文件中 cni 插件: 需要访问 etcd 使用证书
# calicoctl: 操作集群网络时访问 etcd 使用证书
# calico/kube-controllers: 同步集群网络策略时访问 etcd 使用证书
# 创建存放calico证书
mkdir -p /etc/kubernetes/ca/calico
# 准备calico证书配置 - calico只需客户端证书,因此证书请求中 hosts 字段可以为空
cp ~/kubernetes/kubernetes-starter/target/ca/calico/calico-csr.json /etc/kubernetes/ca/calico/
cd /etc/kubernetes/ca/calico/
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes calico-csr.json | cfssljson -bare calico
# 我们最终要的是calico-key.pem和calico.pem
ls
calico.csr calico-csr.json calico-key.pem calico.pem
# 启动kube-calico.service 服务
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
systemctl enable kube-calico.service
# 启动kube-calico服务需要下载镜像
service kube-calico start
3.k8s-master2 部署
1.安装Docker
# 关闭防火墙
ufw disable && ufw status
# 执行脚本安装docker
curl -s https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
# 修改docker.server参数
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
# 重新加载docker.server及重启docker服务
systemctl daemon-reload && service docker restart
service docker status
2. 生成配置文件及根证书
# 添加内核参数
/etc/sysctl.d/k8s.conf
# 参数说明:
# Controls IP packet forwarding
net.ipv4.ip_forward = 1
# Enable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
# 执行命令,添加内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 使内核参数生效
sysctl -p /etc/sysctl.d/k8s.conf
# 如提示以下报错,则执行:modprobe br_netfilter
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
# 获取k8s二进制文件及配置文件
# kubernetes.git 并非官网的文件,是自定义安装k8s集群所需的文件
root@master:~# git clone https://code.aliyun.com/jy1779/kubernetes.git
# 解压k8s二进制文件,并添加到系统环境变量
root@master:~# tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/ && rm -f ./kubernetes/kubernetes-bins.tar.gz
root@master:~# echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile
# 检测环境变量
root@master:~# which kubectl
/usr/local/sbin/kubernetes-bins/kubectl
# 生成配置文件
cd /root/kubernetes/kubernetes-starter/
# 修改配置文件
vim config.propertie
#kubernetes二进制文件目录,eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#当前节点ip, eg: 192.168.1.102
NODE_IP=192.168.1.108
#etcd服务集群列表, eg: http://192.168.1.102:2379
#如果已有etcd集群可以填写现有的。没有的话填写:http://${MASTER_IP}:2379 (MASTER_IP自行替换成自己的主节点ip)
ETCD_ENDPOINTS=https://192.168.1.107
#kubernetes主节点ip地址, eg: 192.168.1.102
MASTER_IP=192.168.1.108
./ with-ca
====替换变量列表====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.72
ETCD_ENDPOINTS=https://192.168.1.72:2379,https://192.168.1.73:2379,https://192.168.1.74:2379
MASTER_IP=192.168.1.72
====================
====替换配置文件====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
配置生成成功,位置: /root/kubernetes/kubernetes-starter/target
# 安装cfssl
wget -q --show-progress --https-only --timestamping \
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
# 修改为可执行权限
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
# 移动到bin目录
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
# 验证
cfssl version
# 生成根证书
# 创建目录存放ca证书
mkdir -p /etc/kubernetes/ca
# 从k8s-master1获取证书
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-key.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-config.json /etc/kubernetes/ca/# 创建存放etcd证书的目录
# 从k8s-master1获取证书
mkdir -p /etc/kubernetes/ca/etcd
rsync -av 192.168.1.107:/etc/kubernetes/ca/etcd/etcd-key.pem /etc/kubernetes/ca/etcd/
rsync -av 192.168.1.107:/etc/kubernetes/ca/etcd/etcd.pem /etc/kubernetes/ca/etcd/
# 测试连接etcd服务
ETCDCTL_API=3 etcdctl \
--endpoints=https://192.168.1.107:2379 \
--cacert=/etc/kubernetes/ca/ca.pem \
--cert=/etc/kubernetes/ca/etcd/etcd.pem \
--key=/etc/kubernetes/ca/etcd/etcd-key.pem \
endpoint health
# 提示以下,则正常
https://192.168.1.107:2379 is healthy: successfully committed proposal: took = 341.160166ms
3.部署APIServer
# 创建存放api证书目录
# 从k8s-master1获取证书
mkdir -p /etc/kubernetes/ca/kubernetes
cd /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/kubernetes-key.pem /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/kubernetes.pem /etc/kubernetes/ca/kubernetes/
rsync -av 192.168.1.107:/etc/kubernetes/ca/kubernetes/token.csv /etc/kubernetes/ca/kubernetes/
# 把apiservice服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-apiserver.service /lib/systemd/system/
# 创建kube-apiserver服务
systemctl enable kube-apiserver.service
# 启动kube-apiserver服务
service kube-apiserver start
# 查看kube-apiserver日志
journalctl -f -u kube-apiserver
4. 部署Controller-manager
controller-manager一般与api-server在同一台机器上,所以可以使用非安全端口与api-server通讯,不需要生成证书和私钥。
# 把kube-controller-manager.service 服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-controller-manager.service /lib/systemd/system/
# 创建kube-controller-manager.service 服务
systemctl enable kube-controller-manager.service
# 启动kube-controller-manager.service 服务
service kube-controller-manager start
# 查看kube-controller-manager.service 日志
journalctl -f -u kube-controller-manager
5.部署Scheduler
Scheduler一般与api-server在同一台机器上,所以可以使用非安全端口与api-server通讯,不需要生成证书和私钥。
# 把scheduler 服务配置文件copy到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/master-node/kube-scheduler.service /lib/systemd/system/
# 创建kube-scheduler.service 服务
systemctl enable kube-scheduler.service
# 启动kube-scheduler.service 服务
service kube-scheduler start
# 查看kube-scheduler.service 日志
journalctl -f -u kube-scheduler
6.配置Kubectl管理
# 创建存放kubectl证书目录
mkdir -p /etc/kubernetes/ca/admin
# 准备admin证书配置 - kubectl只需客户端证书,因此证书请求中 hosts 字段可以为空
# 复制kubectl证书配置
cp ~/kubernetes/kubernetes-starter/target/ca/admin/admin-csr.json /etc/kubernetes/ca/admin/
# 使用根证书(ca.pem)签发admin证书
cd /etc/kubernetes/ca/admin/
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
# 配置kubectl文件
# 指定apiserver的地址和证书位置
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ca/ca.pem \
--embed-certs=true \
--server=https://192.168.1.108:6443
# 设置客户端认证参数,指定admin证书和秘钥
kubectl config set-credentials admin \
--client-certificate=/etc/kubernetes/ca/admin/admin.pem \
--embed-certs=true \
--client-key=/etc/kubernetes/ca/admin/admin-key.pem
# 关联用户和集群
kubectl config set-context kubernetes \
--cluster=kubernetes --user=admin
# 设置当前上下文
kubectl config use-context kubernetes
# 查看认证
cat ~/.kube/config
# 查看master 组件
kubectl get componentstatus
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
7.部署Calico网络
# 创建存放calico证书
mkdir -p /etc/kubernetes/ca/calico
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico.pem /etc/kubernetes/ca/calico
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico-key.pem /etc/kubernetes/ca/calico
# 启动kube-calico.service 服务
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
systemctl enable kube-calico.service
# 启动kube-calico服务需要下载镜像
service kube-calico start
journalctl -f -u kube-calico
# 日志查看
calicoctl node status
4.部署nginx
服务器:192.168.1.55
docker-compose 部署nginx
# 查看nginx的docker-compose结构
tree -L 2 nginx/
nginx/
├── conf
│ ├── conf.d
│ ├── fastcgi_params
│ ├── koi-utf
│ ├── koi-win
│ ├── mime.types
│ ├── modules -> /usr/lib/nginx/modules
│ ├── nginx.conf
│ ├── scgi_params
│ ├── uwsgi_params
│ └── win-utf
├── docker-compose.yml
└── html
├── 50x.html
└── index.html
# docker-compose.yaml配置文件
cd nginx
cat docker-compose.yml
version: '2.0'
services:
nginxs:
image: nginx
container_name: nginxs
network_mode: host
volumes:
- "./conf:/etc/nginx"
- "./html:/usr/share/nginx/html"
# 查看nginx配置文件
cat conf/nginx.conf
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
# 4层转发
stream {
log_format ws "$remote_addr $upstream_addr $time_local $status";
access_log /var/log/nginx/k8s.log ws;
server {
listen 6443;
proxy_pass app_server;
}
upstream app_server{
server 192.168.1.107:6443; #k8s-master1
server 192.168.1.108:6443; #k8s-master2
}
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
docker-compose ps
Name Command State Ports
---------------------------------------------
nginxs nginx -g daemon off; Up
# 查看端口
netstat -nutlp|grep 6443
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 5706/nginx: master
5.部署Node节点
1.安装Docker
# 关闭防火墙
ufw disable && ufw status
# 执行docker安装脚本
curl -s https://raw.githubusercontent.com/jy1779/docker/master/install/aliyun_docker_install.sh | bash
# 获取二进制文件
git clone https://code.aliyun.com/jy1779/kubernetes.git
# 解压kubernetes-bins,添加到环境变量
tar xf ./kubernetes/kubernetes-bins.tar.gz -C /usr/local/sbin/
echo 'export PATH=$PATH:/usr/local/sbin/kubernetes-bins' >> /etc/profile && source /etc/profile
# 修改docker.server
LINE=$(grep -n ExecStart /lib/systemd/system/docker.service|awk -F : '{print $1}')
EXECSTARTPOST='ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT'
sed "$LINE a$EXECSTARTPOST" -i /lib/systemd/system/docker.service
# 重启docker
systemctl daemon-reload && service docker restart
service docker status
2.生成配置文件
# 添加内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
# 使内核参数生效
sysctl -p /etc/sysctl.d/k8s.conf
# 修改配置文件config.properties
cd /root/kubernetes/kubernetes-starter/
# 查看配置文件
cat config.properties
#kubernetes二进制文件目录,eg: /home/michael/bin
BIN_PATH=/usr/local/sbin/kubernetes-bins
#当前节点ip, eg: 192.168.1.102
NODE_IP=192.168.1.109
#etcd服务集群列表, eg: http://192.168.1.102:2379
#如果已有etcd集群可以填写现有的。没有的话填写:http://${MASTER_IP}:2379 (MASTER_IP自行替换成自己的主节点ip)
ETCD_ENDPOINTS=https://192.168.1.107:2379
#kubernetes主节点ip地址, eg: 192.168.1.102
MASTER_IP=192.168.1.55
# 生成配置文件
cd ~/kubernetes/kubernetes-starter && ./ with-ca
====替换变量列表====
BIN_PATH=/usr/local/sbin/kubernetes-bins
NODE_IP=192.168.1.109
ETCD_ENDPOINTS=https://192.168.1.107:2379
MASTER_IP=192.168.1.55
====================
====替换配置文件====
all-node/kube-calico.service
ca/admin/admin-csr.json
ca/ca-config.json
ca/ca-csr.json
ca/calico/calico-csr.json
ca/etcd/etcd-csr.json
ca/kube-proxy/kube-proxy-csr.json
ca/kubernetes/kubernetes-csr.json
master-node/etcd.service
master-node/kube-apiserver.service
master-node/kube-controller-manager.service
master-node/kube-scheduler.service
services/kube-dashboard.yaml
services/kube-dns.yaml
worker-node/10-calico.conf
worker-node/kubelet.service
worker-node/kube-proxy.service
=================
配置生成成功,位置: /root/kubernetes/kubernetes-starter/target
# 安装cfssl
wget -q --show-progress --https-only --timestamping \
https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \
https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
cfssl version
# 创建存放证书目录
# 从k8s-master1获取
mkdir -p /etc/kubernetes/ca/
mkdir -p /etc/kubernetes/ca/calico/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-key.pem /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca-config.json /etc/kubernetes/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico.pem /etc/kubernetes/ca/calico/
rsync -av 192.168.1.107:/etc/kubernetes/ca/calico/calico-key.pem /etc/kubernetes/ca/calico/
3.部署Calico网络
# 复制calico启动文件到系统服务目录
cp ~/kubernetes/kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/
# 创建kube-calico.service
systemctl enable kube-calico.service
# 启动kube-calico.service 服务
service kube-calico start
# 查看calico节点,可以看到master节点的calico
calicoctl node status
4.部署Kubelet
cd /etc/kubernetes/
# 创建bootstrap.kubeconfig
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ca/ca.pem \
--embed-certs=true \
--server=https://192.168.1.55:6443 \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-credentials kubelet-bootstrap \
--token=97e8c07dce2b2bab69cfd3162d5383c9 \
--kubeconfig=bootstrap.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 准备cni
mkdir -p /etc/cni/net.d/
cp ~/kubernetes/kubernetes-starter/target/worker-node/10-calico.conf /etc/cni/net.d/
# 创建存放kubelet工作目录
mkdir /var/lib/kubelet
# 将kubelet.service 复制到系统目录
cp ~/kubernetes/kubernetes-starter/target/worker-node/kubelet.service /lib/systemd/system/
# 创建kubelet服务
systemctl enable kubelet
# 启动kubelet服务
service kubelet start
5.Master签发证书
# 在master服务器执行
kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE 16s kubelet-bootstrap Pending
# 执行指令签发
kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve
certificatesigningrequest "node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE" approved
# 再次查看
kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-Vuj62TUED4foaVjOmsbvMLJpfDsy1RBHbKMAhgtuoyE 1m kubelet-bootstrap Approved,Issued
# 验证节点
kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.109 Ready <none> 52s v1.9.0
6.部署kube-proxy
# 创建kube-proxy工作目录及存放证书目录
mkdir -p /var/lib/kube-proxy
mkdir -p /etc/kubernetes/ca/kube-proxy
# 复制kube-proxy服务配置文件
cp ~/kubernetes/kubernetes-starter/target/ca/kube-proxy/kube-proxy-csr.json /etc/kubernetes/ca/kube-proxy/
cd /etc/kubernetes/ca/kube-proxy/
# 使用根证书(ca.pem)签发calico证书
cfssl gencert \
-ca=/etc/kubernetes/ca/ca.pem \
-ca-key=/etc/kubernetes/ca/ca-key.pem \
-config=/etc/kubernetes/ca/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
cd /etc/kubernetes/
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/ca/ca.pem \
--embed-certs=true \
--server=https://192.168.1.55:6443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/ca/kube-proxy/kube-proxy.pem \
--client-key=/etc/kubernetes/ca/kube-proxy/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
# 复制kube-proxy启动文件到系统目录
cp ~/kubernetes/kubernetes-starter/target/worker-node/kube-proxy.service /lib/systemd/system/
# 创建kube-proxy服务
systemctl enable kube-proxy
# 启动kube-proxy服务
service kube-proxy start
7.在k8s-master1创建deployment
# nginx-depolyment.yaml 配置文件
cat nginx-depolyment.yaml
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: nginx
annotations:
/secure-backends: "true"
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: /jonny/nginx:1.9.14
ports:
- containerPort: 80
# nginx-service.yaml配置文件
cat nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app: nginx
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 20001
type: NodePort
# 查看nginx pod
kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-65dbdf6899-z8cp5 1/1 Running 0 2m
kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.68.0.1 <none> 443/TCP 2h
nginx-service NodePort 10.68.169.183 <none> 80:20001/TCP 2m
# 验证nginx
curl -I http://192.168.1.109:20001/
HTTP/1.1 200 OK
Server: nginx/1.9.14
Date: Thu, 18 Apr 2019 09:04:03 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 21 Sep 2016 08:11:20 GMT
Connection: keep-alive
ETag: "57e240a8-264"
Accept-Ranges: bytes
5.添加kubectl远程客户端
在nginx服务器上添加kubectl客户端
从k8s-master获取
rsync -av 192.168.1.107:/usr/local/sbin/kubernetes-bins/kubectl /usr/bin/kubectl
rsync -av 192.168.1.107:/etc/kubernetes/ca/admin/ /root/kubectl/ca/
rsync -av 192.168.1.107:/etc/kubernetes/ca/ca.pem /root/kubectl/ca/
kubectl config set-cluster kubernetes --server=https://192.168.1.55:6443 --certificate-authority=ca.pem
# 设置用户项中cluster-admin用户证书认证字段
kubectl config set-credentials cluster-admin --certificate-authority=ca.pem --client-key=admin-key.pem --client-certificate=admin.pem
# 设置环境项中名为default的默认集群和用户
kubectl config set-context default --cluster=kubernetes --user=cluster-admin
# 设置默认环境项为default
kubectl config use-context default
cat /root/.kube/config
apiVersion: v1
clusters:
- cluster:
certificate-authority: /root/kubectl/ca/ca.pem
server: https://192.168.1.55:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: cluster-admin
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: cluster-admin
user:
as-user-extra: {}
client-certificate: /root/kubectl/ca/admin.pem
client-key: /root/kubectl/ca/admin-key.pem
# 验证
kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.109 Ready <none> 1h v1.9.0
6.模拟关闭一个k8s-master1
# 在k8s-master1操作
service kube-apiserver stop
service kube-controller-manager stop
service kube-scheduler stop
# 本地已经无法执行
kubectl get node
The connection to the server 192.168.1.107:6443 was refused - did you specify the right host or port?
# 在nginx服务器的远程的客户端执行,不受影响
kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.1.109 Ready <none> 48m v1.9.0
kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-65dbdf6899-z8cp5 1/1 Running 0 42m
# 可以查看nginx的日志,当192.168.1.107 master关闭后,请求转发到另外一个master192.168.1.108
192.168.1.55 192.168.1.108:6443 18/Apr/2019:10:19:19 +0000 200
转载于:https://blog.51cto.com/jiay1/2381178
















