kubeadm 部署k8s

  • 节点分布

    • 控制节点: 192.168.111.20
    • 工作节点: 192.168.111.21 和 192.168.111.22

一、初始化

1.1 替换yum源

[root@node2 yum.repos.d]# ls
CentOS-Base.repo
[root@node2 yum.repos.d]# cat CentOS-Base.repo
# CentOS-Base.repo
#
# The mirror system uses the connecting IP address of the client and the
# update status of each mirror to pick mirrors that are updated to and
# geographically close to the client.  You should use this for CentOS updates
# unless you are manually picking other mirrors.
#
# If the mirrorlist= does not work for you, as a fall back you can try the 
# remarked out baseurl= line instead.
#
#
 
[base]
name=CentOS-$releasever - Base - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/os/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/os/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#released updates 
[updates]
name=CentOS-$releasever - Updates - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/updates/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/updates/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/extras/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/extras/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/centosplus/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/centosplus/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7
 
#contrib - packages by Centos Users
[contrib]
name=CentOS-$releasever - Contrib - mirrors.aliyun.com
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/contrib/$basearch/
        http://mirrors.aliyuncs.com/centos/$releasever/contrib/$basearch/
        http://mirrors.cloud.aliyuncs.com/centos/$releasever/contrib/$basearch/
gpgcheck=1
enabled=0
gpgkey=http://mirrors.aliyun.com/centos/RPM-GPG-KEY-CentOS-7

1.2 安装基础环境

[root@node2 yum.repos.d]# yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat  ipvsadm conntrack ntpdate telnet ipvsadm

1.3 关闭swap

[root@node2 yum.repos.d]# swapoff -a
[root@node2 yum.repos.d]# cat /etc/fstab 
#永久关闭:注释swap挂载,给swap这行开头加一下注释
  • 问题: 为什么关闭swap?
Swap是交换分区,如果机器内存不够,会使用swap分区,但是swap分区的性能较低,k8s设计的时候为了能提升性能,默认是不允许使用姜欢分区的。Kubeadm初始化的时候会检测swap是否关闭,如果没关闭,那就初始化失败。如果不想要关闭交换分区,安装k8s的时候可以指定--ignore-preflight-errors=Swap来解决。

1.4 修改内核

[root@node2 ~]# modprobe br_netfilter
[root@node2 ~]# echo "modprobe br_netfilter" >> /etc/profile
[root@node2 ~]# cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
[root@node2 ~]# sysctl -p /etc/sysctl.d/k8s.conf
  • 问题: 为什么要执行modprobe br_netfilter?
[root@node2 ~]#sysctl -p /etc/sysctl.d/k8s.conf出现报错:

sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory

#解决方法:
[root@node2 ~]# modprobe br_netfilter

1.5 关闭防火墙和selinux

[root@node2 ~]# systemctl stop firewalld ; systemctl disable firewalld
[root@node2 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
#修改selinux配置文件之后,重启机器,selinux配置才能永久生效
[root@node2 ~]#getenforce
Disabled
#显示Disabled说明selinux已经关闭

1.6 配置docker源和epel源

[root@node2 ~]# yum -y install yum-utils
#配置国内阿里云docker的repo源
[root@node2 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#配置epel源
[root@node2 yum.repos.d]# cat epel.repo 
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch
metalink=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch&infra=$infra&content=$contentdir
failovermethod=priority
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7

[epel-debuginfo]
name=Extra Packages for Enterprise Linux 7 - $basearch - Debug
#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug
metalink=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch&infra=$infra&content=$contentdir
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1

[epel-source]
name=Extra Packages for Enterprise Linux 7 - $basearch - Source
#baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS
metalink=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch&infra=$infra&content=$contentdir
failovermethod=priority
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7
gpgcheck=1

#配置安装k8s组件需要的阿里云的repo源
[root@node2 ~]#vim  /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0

1.7 配置时间同步

[root@node2 ~]# yum install ntpdate -y
[root@node2 ~]# crontab -e
* */1 * * * /usr/sbin/ntpdate   cn.pool.ntp.org

1.8 开启IPVS

[root@node2 yum.repos.d]# cd /etc/sysconfig/modules/
[root@node2 modules]# ls
ipvs.modules
[root@node2 modules]# cat ipvs.modules 
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done

[root@node2 yum.repos.d]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145458  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack

二、安装docker

  • 安装docker 环境
#三台机器都需要安装
[root@master ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 containerd.io  -y
[root@node2 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 containerd.io  -y
[root@node1 ~]# yum install docker-ce-20.10.6 docker-ce-cli-20.10.6 containerd.io  -y

#启动docker,三台机器都需要执行
[root@master ~]# systemctl start docker && systemctl enable docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

  • 配置docker镜像加速
[root@node2 docker]# cat  /etc/docker/daemon.json
{
 "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
#修改docker文件驱动为systemd,默认为cgroupfs,kubelet默认使用systemd,两者必须一致才可以

[root@master ~]# systemctl daemon-reload  && systemctl restart docker
[root@master ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since Mon 2022-08-29 22:37:06 CST; 34s ago
     Docs: https://docs.docker.com
 Main PID: 13119 (dockerd)
    Tasks: 10
   Memory: 42.5M
   CGroup: /system.slice/docker.service
           └─13119 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.655223549+08:00" level=info msg="ccResolverWrapper: sending update to cc: {[{unix:///run/containerd/containerd.sock  <nil> 0 <nil>}] <nil> <nil>}" module=grpc
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.655231936+08:00" level=info msg="ClientConn switching balancer to \"pick_first\"" module=grpc
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.663246881+08:00" level=info msg="[graphdriver] using prior storage driver: overlay2"
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.665089277+08:00" level=info msg="Loading containers: start."
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.741472655+08:00" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address"
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.770290896+08:00" level=info msg="Loading containers: done."
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.807356996+08:00" level=info msg="Docker daemon" commit=8728dd2 graphdriver(s)=overlay2 version=20.10.6
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.807401641+08:00" level=info msg="Daemon has completed initialization"
Aug 29 22:37:06 master systemd[1]: Started Docker Application Container Engine.
Aug 29 22:37:06 master dockerd[13119]: time="2022-08-29T22:37:06.820293815+08:00" level=info msg="API listen on /var/run/docker.sock"

三、初始化k8s所需软件包

  • 安装kubelet kubeadm kubectl
[root@master ~]# yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
[root@master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master ~]# systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: inactive (dead)
     Docs: https://kubernetes.io/docs/
#上面可以看到kubelet状态不是running状态,这个是正常的,不用管,等k8s组件起来这个kubelet就正常了

四、 k8s安装master节点

#可以离线上传,也可以在线安装
[root@master src]# kubeadm init --kubernetes-version=1.20.6  --apiserver-advertise-address=192.168.111.20  --image-repository registry.aliyuncs.com/google_containers  --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification
[init] Using Kubernetes version: v1.20.6
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.6. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.96.0.1 192.168.111.20]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [192.168.111.20 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [192.168.111.20 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 57.002241 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 9woxx2.1cw551rmlxhurpo0
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.111.20:6443 --token 9woxx2.1cw551rmlxhurpo0 \
    --discovery-token-ca-cert-hash sha256:f8d7b2fed1222fc6696d410a120191f506e8f364c28d3c5854c06cc7f3b6524e#node节点加入集群,需要保存下来,每个人的都不一样

#注:--image-repository registry.aliyuncs.com/google_containers:手动指定仓库地址为registry.aliyuncs.com/google_containers。kubeadm默认从k8s.grc.io拉取镜像,但是k8s.gcr.io访问不到,所以需要指定从registry.aliyuncs.com/google_containers仓库拉取镜像。

[root@master src]#   mkdir -p $HOME/.kube
[root@master src]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master src]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master src]#  kubectl get nodes
NAME     STATUS     ROLES                  AGE    VERSION
master   NotReady   control-plane,master   2m2s   v1.20.6 #此时集群状态还是NotReady状态,因为没有安装网络插件。

注:每个软件包的作用

  • Kubeadm: kubeadm是一个工具,用来初始化k8s集群的

  • kubelet: 安装在集群所有节点上,用于启动Pod的

  • kubectl: 通过kubectl可以部署和管理应用,查看各种资源,创建、删除和更新各种组件

五、kubeadm安装node

[root@master src]# kubeadm token create --print-join-command #查看加入节点指令
kubeadm join 192.168.111.20:6443 --token dzauur.v87pet87jz85gnyi     --discovery-token-ca-cert-hash sha256:f8d7b2fed1222fc6696d410a120191f506e8f364c28d3c5854c06cc7f3b6524e
  • 将node加入k8s集群
[root@node1 src]# kubeadm join 192.168.111.20:6443 --token dzauur.v87pet87jz85gnyi     --discovery-token-ca-cert-hash sha256:f8d7b2fed1222fc6696d410a120191f506e8f364c28d3c5854c06cc7f3b6524e
[preflight] Running pre-flight checks
	[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 20.10.6. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@master src]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
master   NotReady   control-plane,master   6m33s   v1.20.6
node1    NotReady   <none>                 71s     v1.20.6
node2    NotReady   <none>                 7s      v1.20.6

#添加label
[root@master src]# kubectl label node node1 node-role.kubernetes.io/worker=worker
node/node1 labeled
[root@master src]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
master   NotReady   control-plane,master   7m56s   v1.20.6
node1    NotReady   worker                 2m34s   v1.20.6
node2    NotReady   worker                 90s     v1.20.6

六、 安装calico插件

  • calico yaml文件下载: https://docs.projectcalico.org/manifests/calico.yaml**
[root@master src]# kubectl apply -f  calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created

[root@master src]# kubectl get pod -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-6949477b58-skmx2   1/1     Running   0          40s
calico-node-9b9pn                          1/1     Running   0          40s
calico-node-hghxv                          1/1     Running   0          40s
calico-node-jdjw7                          1/1     Running   0          40s
coredns-7f89b7bc75-4wgtn                   1/1     Running   0          10m
coredns-7f89b7bc75-f7cqv                   1/1     Running   0          10m
etcd-master                                1/1     Running   0          10m
kube-apiserver-master                      1/1     Running   0          10m
kube-controller-manager-master             1/1     Running   0          10m
kube-proxy-8qcxd                           1/1     Running   0          10m
kube-proxy-rfjwn                           1/1     Running   0          5m38s
kube-proxy-xw7s7                           1/1     Running   0          4m34s
kube-scheduler-master                      1/1     Running   0          10m

[root@master src]# kubectl get nodes
NAME     STATUS   ROLES                  AGE     VERSION
master   Ready    control-plane,master   11m     v1.20.6
node1    Ready    worker                 5m58s   v1.20.6
node2    Ready    worker                 4m54s   v1.20.6

六、测试k8s

  • 创建pod测试网络
[root@master src]# docker load -i busybox-1-28.tar.gz
432b65032b94: Loading layer [==================================================>]   1.36MB/1.36MB
Loaded image: busybox:1.28
[root@master src]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # ping www.baidu.com
PING www.baidu.com (39.156.66.18): 56 data bytes
64 bytes from 39.156.66.18: seq=0 ttl=127 time=7.598 ms
64 bytes from 39.156.66.18: seq=1 ttl=127 time=7.377 ms
^C
--- www.baidu.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 7.377/7.487/7.598 ms
#通过上面可以看到能访问网络,说明calico网络插件已经被正常安装了
  • 测试k8s部署tomcat
[root@master src]# cat tomcat.yaml 
apiVersion: v1  #pod属于k8s核心组v1
kind: Pod  #创建的是一个Pod资源
metadata:  #元数据
  name: demo-pod  #pod名字
  namespace: default  #pod所属的名称空间
  labels:
    app: myapp  #pod具有的标签
    env: dev      #pod具有的标签
spec:
  containers:      #定义一个容器,容器是对象列表,下面可以有多个name
  - name:  tomcat-pod-java  #容器的名字
    ports:
    - containerPort: 8080
    image: tomcat:8.5-jre8-alpine   #容器使用的镜像
    imagePullPolicy: IfNotPresent

[root@master src]# kubectl apply -f tomcat.yaml
pod/demo-pod created
[root@master src]# kubectl apply -f tomcat-service.yaml 
service/tomcat created
[root@master src]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP          16m
tomcat       NodePort    10.97.163.141   <none>        8080:30080/TCP   16s

[root@master src]# kubectl get pods -owide
NAME       READY   STATUS    RESTARTS   AGE     IP               NODE    NOMINATED NODE   READINESS GATES
demo-pod   1/1     Running   0          2m54s   10.244.166.131   node1   <none>           <none>
[root@master src]# curl -Lvo /dev/null http://192.168.111.21:30080/
* About to connect() to 192.168.111.21 port 30080 (#0)
*   Trying 192.168.111.21...
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0* Connected to 192.168.111.21 (192.168.111.21) port 30080 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: 192.168.111.21:30080
> Accept: */*
> 
< HTTP/1.1 200 
< Content-Type: text/html;charset=UTF-8
< Transfer-Encoding: chunked
< Date: Mon, 29 Aug 2022 15:15:53 GMT
< 
{ [data not shown]
100 11184    0 11184    0     0  2317k      0 --:--:-- --:--:-- --:--:-- 2730k
* Connection #0 to host 192.168.111.21 left intact

  • 测试coredns
[root@master src]# kubectl run busybox --image busybox:1.28 --restart=Never --rm -it busybox -- sh
If you don't see a command prompt, try pressing enter.
/ # 
/ # 
/ # nslookup kubernetes.default.svc.cluster.local
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes.default.svc.cluster.local
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

#busybox要用指定的1.28版本,不能用最新版本,最新版本,nslookup会解析不到dns和ip 
  • 查看证书有效时间

openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -text |grep Not

openssl x509 -in /etc/kubernetes/pki/apiserver.crt -noout -text |grep Not