| |||
hostnamectl set-hostname k8s-m-1 ... hostnamectl set-hostname k8s-w-10
cat >>/etc/hosts<<EOF 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 #k8s master 10.66.10.26 k8s-m-1 10.66.10.27 k8s-m-2 10.66.10.28 k8s-m-3 #k8s node 10.66.10.31 k8s-w-1 10.66.10.32 k8s-w-2 10.66.10.33 k8s-w-3 10.66.10.34 k8s-w-4 10.66.10.35 k8s-w-5 10.66.10.36 k8s-w-6 10.66.10.37 k8s-w-7 10.66.10.38 k8s-w-8 10.66.10.39 k8s-w-9 10.66.10.40 k8s-w-10 EOF
ssh-keygen -t rsa ssh-copy-id root@k8s-m-1 … ssh-copy-id root@k8s-w-10
yum install -y epel-release yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget
systemctl stop firewalld systemctl disable firewalld iptables -F && iptables -X && iptables -F -t nat iptables -X -t nat && iptables -P FORWARD ACCEPT
setenforce 0 sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
cat >> /etc/sysctl.d/kubernetes.conf <<EOF net.bridge.bridge-nf-call-iptables=1 net.bridge.bridge-nf-call-ip6tables=1 net.ipv4.ip_forward=1 net.ipv4.tcp_tw_recycle=0 net.ipv4.neigh.default.gc_thresh1=1024 net.ipv4.neigh.default.gc_thresh1=2048 net.ipv4.neigh.default.gc_thresh1=4096 vm.swappiness=0 vm.overcommit_memory=1 vm.panic_on_oom=0 fs.inotify.max_user_instances=8192 fs.inotify.max_user_watches=1048576 fs.file-max=52706963 fs.nr_open=52706963 net.ipv6.conf.all.disable_ipv6=1 net.netfilter.nf_conntrack_max=2310720 EOF sysctl -p /etc/sysctl.d/kubernetes.conf
timedatectl set-timezone Asia/Shanghai timedatectl set-timezone Asia/Tokyo
systemctl stop postfix && systemctl disable postfix
yum -y update rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm yum --disablerepo="*" --enablerepo="elrepo-kernel" list available yum --enablerepo=elrepo-kernel install kernel-lt.x86_64 -y sudo awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg sudo grub2-set-default
mkdir -p /opt/k8s/{bin,work} /etc/{kubernetes,etcd}/cert
echo 'PATH=/opt/k8s/bin:$PATH' >>/root/.bashrc source /root/.bashrc
mkdir -p /opt/k8s/cert && cd /opt/k8s/work wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl_1.4.1_linux_amd64 mv cfssl_1.4.1_linux_amd64 /opt/k8s/bin/cfssl wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssljson_1.4.1_linux_amd64 mv cfssljson_1.4.1_linux_amd64 /opt/k8s/bin/cfssljson wget https://github.com/cloudflare/cfssl/releases/download/v1.4.1/cfssl-certinfo_1.4.1_linux_amd64 mv cfssl-certinfo_1.4.1_linux_amd64 /opt/k8s/bin/cfssl-certinfo chmod +x /opt/k8s/bin/* export PATH=/opt/k8s/bin:$PA
cd /opt/k8s/work cat > ca-config.json <<EOF { "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "usages": [ "signing", "key encipherment", "server auth", "client auth" ], "expiry": "876000h" } } } } EOF
cd /opt/k8s/work cat > ca-csr.json <<EOF { "CN": "kubernetes-ca", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "opsnull" } ], "ca": { "expiry": "876000h" } } EOF
cd /opt/k8s/work cfssl gencert -initca ca-csr.json | cfssljson -bare ca ls ca*
mkdir -p /etc/kubernetes/cert scp ca*.pem ca-config.json root@k8s-m-1:/etc/kubernetes/cert/ … scp ca*.pem ca-config.json root@k8s-w-10:/etc/kubernetes/cert/
yum install -y keepalived
cat > /etc/keepalived/keepalived.conf <<EOF ! Configuration File for keepalived global_defs { router_id LVS_DEVEL } vrrp_script check_haproxy { script "killall -0 haproxy" interval 3 weight -2 fall 10 rise 2 } vrrp_instance VI_1 { state MASTER interface eth0 virtual_router_id 51 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } virtual_ipaddress { 10.66.10.3 } } EOF
scp -pr /etc/keepalived/keepalived.conf root@k8s-m-1:/etc/keepalived/ (master节点)
systemctl enable keepalived.service systemctl start keepalived.service systemctl status keepalived.service ip address show eth0
yum install -y haproxy
cat >> /etc/haproxy/haproxy.cfg << EOF #--------------------------------------------------------------------- # Example configuration for a possible web application. See the # full configuration options online. # # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt # #--------------------------------------------------------------------- #--------------------------------------------------------------------- # Global settings #--------------------------------------------------------------------- global # to have these messages end up in /var/log/haproxy.log you will # need to: # # 1) configure syslog to accept network log events. This is done # by adding the '-r' option to the SYSLOGD_OPTIONS in # /etc/sysconfig/syslog # # 2) configure local2 events to go to the /var/log/haproxy.log # file. A line like the following can be added to # /etc/sysconfig/syslog # # local2.* /var/log/haproxy.log # log 127.0.0.1 local2 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid maxconn 4000 user haproxy group haproxy daemon # turn on stats unix socket stats socket /var/lib/haproxy/stats #--------------------------------------------------------------------- # common defaults that all the 'listen' and 'backend' sections will # use if not designated in their block #--------------------------------------------------------------------- defaults mode http log global option httplog option dontlognull option http-server-close option forwardfor except 127.0.0.0/8 option redispatch retries 3 timeout http-request 10s timeout queue 1m timeout connect 10s timeout client 1m timeout server 1m timeout http-keep-alive 10s timeout check 10s maxconn 3000 #--------------------------------------------------------------------- # main frontend which proxys to the backends #--------------------------------------------------------------------- frontend kubernetes-apiserver mode tcp bind *:8443 option tcplog default_backend kubernetes-apiserver #--------------------------------------------------------------------- # round robin balancing between the various backends #--------------------------------------------------------------------- backend kubernetes-apiserver mode tcp balance roundrobin server k8s-m-1 10.66.10.26:6443 check server k8s-m-2 10.66.10.27:6443 check server k8s-m-3 10.66.10.28:6443 check #--------------------------------------------------------------------- # collection haproxy statistics message #--------------------------------------------------------------------- listen stats bind *:1080 stats auth admin:awesomePassword stats refresh 5s stats realm HAProxy\ Statistics stats uri /admi
scp -pr /etc/haproxy/haproxy.cfg root@k8s-m-1:/etc/haproxy/
systemctl enable haproxy.service systemctl start haproxy.service systemctl status haproxy.service ss -lnt | grep -E "8443|1080"
https://dl.k8s.io/v1.18.2/kubernetes-server-linux-amd64.tar.gz tar -zxvf kubernetes-server-linux-amd64.tar.gz -C /opt/k8s/work/
cd /opt/k8s/work/kubernetes/server/bin
scp -pr kube-apiserver kubectl kube-controller-manager kube-scheduler root@k8s-m-1:/opt/k8s/bin/ ... scp -pr kube-apiserver kubectl kube-controller-manager kube-scheduler root@k8s-m-3:/opt/k8s/bin/
scp -pr kubelet kube-proxy root@k8s-w-1:/opt/k8s/bin/ ... scp -pr kubelet kube-proxy root@k8s-w-10:/opt/k8s/bin/
cd /opt/k8s/work cat > admin-csr.json <<EOF { "CN": "admin", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:masters", "OU": "opsnull" } ] } EOF
cd /opt/k8s/work cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes admin-csr.json | cfssljson -bare admin
# 设置集群参数 kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/work/ca.pem \ --embed-certs=true \ --server=https://10.66.10.3:8443 \ --kubeconfig=kubectl.kubeconfig # 设置客户端认证参数 kubectl config set-credentials admin \ --client-certificate=/opt/k8s/work/admin.pem \ --client-key=/opt/k8s/work/admin-key.pem \ --embed-certs=true \ --kubeconfig=kubectl.kubeconfig # 设置上下文参数 kubectl config set-context kubernetes \ --cluster=kubernetes \ --user=admin \ --kubeconfig=kubectl.kubeconfig # 设置默认上下文 kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig
mkdir -p ~/.kube scp kubectl.kubeconfig root@k8s-m-1:~/.kube/config … scp kubectl.kubeconfig root@k8s-m-3:~/.kube/config
wget https://github.com/coreos/etcd/releases/download/v3.3.13/etcd-v3.3.13-linux-amd64.tar.gz
cat > etcd-csr.json <<EOF { "CN": "etcd", "hosts": [ "127.0.0.1", "10.66.10.26", "10.66.10.27", "10.66.10.28" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "opsnull" } ] } EOF
cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
mkdir -p /etc/etcd/cert scp -pr etcd*.pem root@k8s-m-1:/etc/etcd/cert/ ... scp -pr etcd*.pem root@k8s-m-2:/etc/etcd/cert/
cat >> /etc/systemd/system/etcd.service <<EOF [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https://github.com/coreos [Service] Type=notify WorkingDirectory=/data/k8s/etcd/data ExecStart=/opt/k8s/bin/etcd \ --data-dir=/data/k8s/etcd/data \ --wal-dir=/data/k8s/etcd/wal \ --name=k8s-m-1 \ --cert-file=/etc/etcd/cert/etcd.pem \ --key-file=/etc/etcd/cert/etcd-key.pem \ --trusted-ca-file=/etc/kubernetes/cert/ca.pem \ --peer-cert-file=/etc/etcd/cert/etcd.pem \ --peer-key-file=/etc/etcd/cert/etcd-key.pem \ --peer-trusted-ca-file=/etc/kubernetes/cert/ca.pem \ --peer-client-cert-auth \ --client-cert-auth \ --listen-peer-urls=https://10.66.10.26:2380 \ --initial-advertise-peer-urls=https://10.66.10.26:2380 \ --listen-client-urls=https://10.66.10.26:2379,http://127.0.0.1:2379 \ --advertise-client-urls=https://10.66.10.26:2379 \ --initial-cluster-token=etcd-cluster-0 \ --initial-cluster=k8s-m-1=https://10.66.10.26:2380,k8s-m-2=https://10.66.10.27:2380,k8s-m-3=https://10.66.10.28:2380 \ --initial-cluster-state=new \ --auto-compaction-mode=periodic \ --auto-compaction-retention=1 \ --max-request-bytes=33554432 \ --quota-backend-bytes=6442450944 \ --heartbeat-interval=250 \ --election-timeout=2000 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target
mkdir -p /data/k8s/etcd/data mkdir -p /data/k8s/etcd/{data,wal} systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd
ETCDCTL_API=3 /opt/k8s/bin/etcdctl \ --endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ --cacert=/etc/kubernetes/cert/ca.pem \ --cert=/etc/etcd/cert/etcd.pem \ --key=/etc/etcd/cert/etcd-key.pem endpoint health
ETCDCTL_API=3 /opt/k8s/bin/etcdctl \ -w table --cacert=/etc/kubernetes/cert/ca.pem \ --cert=/etc/etcd/cert/etcd.pem \ --key=/etc/etcd/cert/etcd-key.pem \ --endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 endpoint status
mkdir -p /opt/k8s/work/flannel wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz tar -xzvf flannel-v0.11.0-linux-amd64.tar.gz -C /opt/k8s/work/flannel
cd /opt/k8s/work scp flannel/{flanneld,mk-docker-opts.sh} root@k8s-w-1:/opt/k8s/bin/ ... scp flannel/{flanneld,mk-docker-opts.sh} root@k8s-w-10:/opt/k8s/bin/ chmod +x /opt/k8s/bin/*
cd /opt/k8s/work cat > flanneld-csr.json <<EOF { "CN": "flanneld", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "4Paradigm" } ] } EOF
cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
cd /opt/k8s/work mkdir -p /etc/flanneld/cert scp flanneld*.pem root@k8s-w-1:/etc/flanneld/cert ... scp flanneld*.pem root@k8s-w-10:/etc/flanneld/cert
cd /opt/k8s/work etcdctl \ --endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ --ca-file=/opt/k8s/work/ca.pem \ --cert-file=/opt/k8s/work/flanneld.pem \ --key-file=/opt/k8s/work/flanneld-key.pem \ set /kubernetes/network/config '{"Network":"'172.16.0.0/16'", "SubnetLen": 21, "Backend": {"Type": "vxlan"}}'
cat >> /etc/systemd/system/flanneld.service <<EOF [Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify ExecStart=/opt/k8s/bin/flanneld \ -etcd-cafile=/etc/kubernetes/cert/ca.pem \ -etcd-certfile=/etc/flanneld/cert/flanneld.pem \ -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \ -etcd-endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ -etcd-prefix=/kubernetes/network \ -iface=eth0 \ -ip-masq ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target RequiredBy=docker.service E
systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld
etcdctl \ --endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ --ca-file=/etc/kubernetes/cert/ca.pem \ --cert-file=/etc/flanneld/cert/flanneld.pem \ --key-file=/etc/flanneld/cert/flanneld-key.pem \ get /kubernetes/network/config
etcdctl \ --endpoints=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ --ca-file=/etc/kubernetes/cert/ca.pem \ --cert-file=/etc/flanneld/cert/flanneld.pem \ --key-file=/etc/flanneld/cert/flanneld-key.pem \ ls /kubernetes/network/subnets
ip route show |grep flannel.1
ssh k8s-w-8 "/usr/sbin/ip addr show flannel.1|grep -w inet"
ssh k8s-w-8 "/usr/sbin/ip addr show flannel.1|grep -w inet"
cat >> kubernetes-csr.json <<EOF { "CN": "kubernetes-master", "hosts": [ "127.0.0.1", "10.66.10.3", "10.66.10.26", "10.66.10.27", "10.66.10.28", "10.254.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local.", "kubernetes.default.svc.cluster.local." ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "opsnull" } ] } EOF
cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
mkdir -p /etc/kubernetes/cert scp kubernetes*.pem root@k8s-m-1:/etc/kubernetes/cert/ ... scp kubernetes*.pem root@k8s-m-3:/etc/kubernetes/cert/
cd /opt/k8s/work cat > encryption-config.yaml <<EOF kind: EncryptionConfig apiVersion: v1 resources: - resources: - secrets providers: - aescbc: keys: - name: key1 secret: $(head -c 32 /dev/urandom | base64) - identity: {} EOF
/etc/kubernetes
scp encryption-config.yaml root@k8s-m-1:/etc/kubernetes/ ... scp encryption-config.yaml root@k8s-m-1:/etc/kubernetes/
cat > audit-policy.yaml <<EOF apiVersion: audit.k8s.io/v1beta1 kind: Policy rules: # The following requests were manually identified as high-volume and low-risk, so drop them. - level: None resources: - group: "" resources: - endpoints - services - services/status users: - 'system:kube-proxy' verbs: - watch - level: None resources: - group: "" resources: - nodes - nodes/status userGroups: - 'system:nodes' verbs: - get - level: None namespaces: - kube-system resources: - group: "" resources: - endpoints users: - 'system:kube-controller-manager' - 'system:kube-scheduler' - 'system:serviceaccount:kube-system:endpoint-controller' verbs: - get - update - level: None resources: - group: "" resources: - namespaces - namespaces/status - namespaces/finalize users: - 'system:apiserver' verbs: - get # Don't log HPA fetching metrics. - level: None resources: - group: metrics.k8s.io users: - 'system:kube-controller-manager' verbs: - get - list # Don't log these read-only URLs. - level: None nonResourceURLs: - '/healthz*' - /version - '/swagger*' # Don't log events requests. - level: None resources: - group: "" resources: - events # node and pod status calls from nodes are high-volume and can be large, don't log responses # for expected updates from nodes - level: Request omitStages: - RequestReceived resources: - group: "" resources: - nodes/status - pods/status users: - kubelet - 'system:node-problem-detector' - 'system:serviceaccount:kube-system:node-problem-detector' verbs: - update - patch - level: Request omitStages: - RequestReceived resources: - group: "" resources: - nodes/status - pods/status userGroups: - 'system:nodes' verbs: - update - patch # deletecollection calls can be large, don't log responses for expected namespace deletions - level: Request omitStages: - RequestReceived users: - 'system:serviceaccount:kube-system:namespace-controller' verbs: - deletecollection # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, # so only log at the Metadata level. - level: Metadata omitStages: - RequestReceived resources: - group: "" resources: - secrets - configmaps - group: authentication.k8s.io resources: - tokenreviews # Get repsonses can be large; skip them. - level: Request omitStages: - RequestReceived resources: - group: "" - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: scheduling.k8s.io - group: settings.k8s.io - group: storage.k8s.io verbs: - get - list - watch # Default level for known APIs - level: RequestResponse omitStages: - RequestReceived resources: - group: "" - group: admissionregistration.k8s.io - group: apiextensions.k8s.io - group: apiregistration.k8s.io - group: apps - group: authentication.k8s.io - group: authorization.k8s.io - group: autoscaling - group: batch - group: certificates.k8s.io - group: extensions - group: metrics.k8s.io - group: networking.k8s.io - group: policy - group: rbac.authorization.k8s.io - group: scheduling.k8s.io - group: settings.k8s.io - group: storage.k8s.io # Default level for all other requests. - level: Metadata omitStages: - RequestRe
scp audit-policy.yaml root@k8s-m-1:/etc/kubernetes/audit-policy.yaml ... scp audit-policy.yaml root@k8s-m-2:/etc/kubernetes/audit-policy.yaml
创建后续访问 metrics-server 或 kube-prometheus 使用的证书
cd /opt/k8s/work cat > proxy-client-csr.json <<EOF { "CN": "aggregator", "hosts": [], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "opsnull" } ] } EOF
cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \ -ca-key=/etc/kubernetes/cert/ca-key.pem \ -config=/etc/kubernetes/cert/ca-config.json \ -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client
scp proxy-client*.pem root@k8s-m-1:/etc/kubernetes/cert/ ... scp proxy-client*.pem root@k8s-m-3:/etc/kubernetes/cert/
cat > /etc/systemd/system/kube-apiserver.service <<EOF [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/data/k8s/k8s/kube-apiserver ExecStart=/opt/k8s/bin/kube-apiserver \ --advertise-address=10.66.10.26 \ --default-not-ready-toleration-seconds=360 \ --default-unreachable-toleration-seconds=360 \ --feature-gates=DynamicAuditing=true \ --max-mutating-requests-inflight=2000 \ --max-requests-inflight=4000 \ --default-watch-cache-size=200 \ --delete-collection-workers=2 \ --encryption-provider-config=/etc/kubernetes/encryption-config.yaml \ --etcd-cafile=/etc/kubernetes/cert/ca.pem \ --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \ --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \ --etcd-servers=https://10.66.10.26:2379,https://10.66.10.27:2379,https://10.66.10.28:2379 \ --bind-address=10.66.10.26 \ --secure-port=6443 \ --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \ --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \ --insecure-port=0 \ --audit-dynamic-configuration \ --audit-log-maxage=15 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-truncate-enabled \ --audit-log-path=/data/k8s/k8s/kube-apiserver/audit.log \ --audit-policy-file=/etc/kubernetes/audit-policy.yaml \ --profiling \ --anonymous-auth=false \ --client-ca-file=/etc/kubernetes/cert/ca.pem \ --enable-bootstrap-token-auth \ --requestheader-allowed-names="aggregator" \ --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-extra-headers-prefix="X-Remote-Extra-" \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User \ --service-account-key-file=/etc/kubernetes/cert/ca.pem \ --authorization-mode=Node,RBAC \ --runtime-config=api/all=true \ --enable-admission-plugins=NodeRestriction \ --allow-privileged=true \ --apiserver-count=3 \ --event-ttl=168h \ --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \ --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \ --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \ --kubelet-https=true \ --kubelet-timeout=10s \ --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \ --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \ --service-cluster-ip-range=10.254.0.0/16 \ --service-node-port-range=30000-32767 \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=10 Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
scp /etc/systemd/system/kube-apiserver.service root@k8s-m-1:/etc/systemd/system/kube-apiserver.service ... scp /etc/systemd/system/kube-apiserver.service root@k8s-m-1:/etc/systemd/system/kube-apiserver.service
mkdir -p /data/k8s/k8s/kube-apiserver systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver
cd /opt/k8s/work cat > kube-controller-manager-csr.json <<EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "hosts": [ "127.0.0.1", "10.66.10.3", "10.66.10.26", "10.66.10.27", "10.66.10.28" ], "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-controller-manager", "OU": "opsnull" } ] } EOF
cd /opt/k8s/work cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
cd /opt/k8s/work scp kube-controller-manager*.pem root@k8s-m-1:/etc/kubernetes/cert/ ... scp kube-controller-manager*.pem root@k8s-m-3:/etc/kubernetes/cert/
kube-controller-manager 使用 kubeconfig 文件访问 apiserver,该文件提供了 apiserver 地址、嵌入的 CA 证书和 kube-controller-manager 证书等信息:
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/work/ca.pem \ --embed-certs=true \ --server="https://10.66.10.3:8443" \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-credentials system:kube-controller-manager \ --client-certificate=kube-controller-manager.pem \ --client-key=kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-context system:kube-controller-manager \ --cluster=kubernetes \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconf
cp -pr kube-controller-manager.kubeconfig /etc/kubernetes/kube-controller-manager.kubeconfig
cat > /etc/systemd/system/kube-controller-manager.service<<EOF [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] WorkingDirectory=/data/k8s/k8s/kube-controller-manager ExecStart=/opt/k8s/bin/kube-controller-manager \ --profiling \ --cluster-name=kubernetes \ --controllers=*,bootstrapsigner,tokencleaner \ --kube-api-qps=1000 \ --kube-api-burst=2000 \ --leader-elect \ --use-service-account-credentials\ --concurrent-service-syncs=2 \ --bind-address=10.66.10.26 \ --secure-port=10252 \ --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \ --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \ --port=0 \ --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-allowed-names="aggregator" \ --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-extra-headers-prefix="X-Remote-Extra-" \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User \ --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \ --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \ --experimental-cluster-signing-duration=876000h \ --horizontal-pod-autoscaler-sync-period=10s \ --concurrent-deployment-syncs=10 \ --concurrent-gc-syncs=30 \ --node-cidr-mask-size=24 \ --service-cluster-ip-range=10.254.0.0/16 \ --pod-eviction-timeout=6m \ --terminated-pod-gc-threshold=10000 \ --root-ca-file=/etc/kubernetes/cert/ca.pem \ --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \ --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EO
scp /etc/systemd/system/kube-controller-manager.service root@k8s-m-1:/etc/systemd/system/kube-controller-manager.service ... scp /etc/systemd/system/kube-controller-manager.service root@k8s-m-3:/etc/systemd/system/kube-controller-manager.service
mkdir -p /data/k8s/k8s/kube-controller-manager systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager
cat > kube-scheduler-csr.json <<EOF { "CN": "system:kube-scheduler", "hosts": [ "127.0.0.1", "10.66.10.3", "10.66.10.26", "10.66.10.27", "10.66.10.28" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "system:kube-scheduler", "OU": "opsnull" } ] } EOF
cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
scp -pr kube-scheduler*.pem root@k8s-m-1:/etc/kubernetes/cert/ … scp -pr kube-scheduler*.pem root@k8s-m-3:/etc/kubernetes/cert/
创建和分发 kubeconfig 文件
kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/work/ca.pem \ --embed-certs=true \ --server="https://10.66.10.3:8443" \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-credentials system:kube-scheduler \ --client-certificate=kube-scheduler.pem \ --client-key=kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-context system:kube-scheduler \ --cluster=kubernetes \ --user=system:kube-scheduler \ --kubeconfig=kube-scheduler.kubeconfig kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconf
scp -pr kube-scheduler.kubeconfig root@k8s-m-1:/etc/kubernetes/kube-scheduler.kubeconfig … scp -pr kube-scheduler.kubeconfig root@k8s-m-3:/etc/kubernetes/kube-scheduler.kubeconfig
创建 kube-scheduler 配置文件
cat > /etc/kubernetes/kube-scheduler.yaml <<EOF apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration bindTimeoutSeconds: 600 clientConnection: burst: 200 kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig" qps: 100 enableContentionProfiling: false enableProfiling: true hardPodAffinitySymmetricWeight: 1 healthzBindAddress: 10.66.10.26:10251 leaderElection: leaderElect: true metricsBindAddress: 10.66.10.26:10251 EOF
scp -pr /etc/kubernetes/kube-scheduler.yaml root@k8s-m-1:/etc/kubernetes/kube-scheduler.yaml … scp -pr /etc/kubernetes/kube-scheduler.yaml root@k8s-m-3:/etc/kubernetes/kube-scheduler.yaml
cat > /etc/systemd/system/kube-scheduler.service <<EOF [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] WorkingDirectory=/data/k8s/k8s/kube-scheduler ExecStart=/opt/k8s/bin/kube-scheduler \ --config=/etc/kubernetes/kube-scheduler.yaml \ --bind-address=10.66.10.26 \ --secure-port=10259 \ --port=0 \ --tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \ --tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \ --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ --client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-allowed-names="aggregator" \ --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \ --requestheader-extra-headers-prefix="X-Remote-Extra-" \ --requestheader-group-headers=X-Remote-Group \ --requestheader-username-headers=X-Remote-User \ --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \ --logtostderr=true \ --v=2 Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target
scp -pr /etc/systemd/system/kube-scheduler.service root@k8s-m-1:/etc/systemd/system/kube-scheduler.service … scp -pr /etc/systemd/system/kube-scheduler.service root@k8s-m-3:/etc/systemd/system/kube-scheduler.service
mkdir -p /data/k8s/k8s/kube-scheduler systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler
cd /opt/k8s/work wget https://download.docker.com/linux/static/stable/x86_64/docker-18.09.6.tgz tar -xvf docker-18.09.6.tgz
cd /opt/k8s/work scp docker/* root@k8s-w-1:/opt/k8s/bin/ ... scp docker/* root@k8s-w-10:/opt/k8s/bin/
cat > /etc/systemd/system/docker.service <<EOF [Unit] Description=Docker Application Container Engine Documentation=http://docs.docker.io [Service] WorkingDirectory=/data/k8s/docker Environment="PATH=/opt/k8s/bin:/bin:/sbin:/usr/bin:/usr/sbin" EnvironmentFile=-/run/flannel/docker ExecStart=/opt/k8s/bin/dockerd $DOCKER_NETWORK_OPTIONS ExecReload=/bin/kill -s HUP $MAINPID Restart=on-failure RestartSec=5 LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target EO
sudo iptables -P FORWARD ACCEPT /sbin/iptables -P FORWARD ACCEPT
cd /opt/k8s/work scp docker.service root@k8s-w-1:/etc/systemd/system/ ... scp docker.service root@k8s-w-10:/etc/systemd/system/
mkdir -p /etc/docker/ /data/k8s/docker/{data,exec}
cat /etc/docker/daemon.json { "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://hub-mirror.c.163.com"], "insecure-registries": ["docker02:35000"], "max-concurrent-downloads": 20, "live-restore": true, "max-concurrent-uploads": 10, "debug": true, "data-root": "/data/k8s/docker/data", "exec-root": "/data/k8s/docker/exec", "log-opts": { "max-size": "100m", "max-file": "5" } }
mkdir -p /etc/docker/ /data/k8s/docker/{data,exec} scp docker-daemon.json root@k8s-w-1:/etc/docker/daemon.json ... scp docker-daemon.json root@k8s-w-10:/etc/docker/daemon.json
systemctl daemon-reload && systemctl enable docker && systemctl restart docker
cat > /opt/k8s/bin/environment.sh <<EOF #!/usr/bin/bash KUBE_APISERVER="https://10.66.10.3:8443" BOOTSTRAP_TOKEN="head -c 16 /dev/urandom | od -An -t x | tr -d ' '" NODE_NAMES=(k8s-w-1 k8s-w-2 k8s-w-3 k8s-w-4 k8s-w-5 k8s-w-6 k8s-w-7 k8s-w-8 k8s-w-9 k8s-w-10) CLUSTER_DNS_SVC_IP="10.254.0.2" CLUSTER_DNS_DOMAIN="cluster.local" EOF source /opt/k8s/bin/environment.sh for node_name in ${NODE_NAMES[@]} do echo ">>> ${node_name}" # 创建 token export BOOTSTRAP_TOKEN=$(kubeadm token create \ --description kubelet-bootstrap-token \ --groups system:bootstrappers:${node_name} \ --kubeconfig ~/.kube/config) # 设置集群参数 kubectl config set-cluster kubernetes \ --certificate-authority=/etc/kubernetes/cert/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig # 设置客户端认证参数 kubectl config set-credentials kubelet-bootstrap \ --token=${BOOTSTRAP_TOKEN} \ --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig # 设置上下文参数 kubectl config set-context default \ --cluster=kubernetes \ --user=kubelet-bootstrap \ --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig # 设置默认上下文 kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
kubeadm token list --kubeconfig ~/.kube/config
cd /opt/k8s/work/ scp -pr kubelet-bootstrap-k8s-w-1.kubeconfig root@k8s-w-1:/etc/kubernetes/kubelet-bootstrap.kubeconfig ... scp -pr kubelet-bootstrap-k8s-w-10.kubeconfig root@k8s-w-10:/etc/kubernetes/kubelet-bootstrap.kubeconfig
cd /opt/k8s/work/ cat > kubelet-config.yaml <<EOF kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 address: "10.66.10.31" staticPodPath: "" syncFrequency: 1m fileCheckFrequency: 20s httpCheckFrequency: 20s staticPodURL: "" port: 10250 readOnlyPort: 0 rotateCertificates: true serverTLSBootstrap: true authentication: anonymous: enabled: false webhook: enabled: true x509: clientCAFile: "/etc/kubernetes/cert/ca.pem" authorization: mode: Webhook registryPullQPS: 0 registryBurst: 20 eventRecordQPS: 0 eventBurst: 20 enableDebuggingHandlers: true enableContentionProfiling: true healthzPort: 10248 healthzBindAddress: "10.66.10.31" clusterDomain: "cluster.local" clusterDNS: - "10.254.0.2" nodeStatusUpdateFrequency: 10s nodeStatusReportFrequency: 1m imageMinimumGCAge: 2m imageGCHighThresholdPercent: 85 imageGCLowThresholdPercent: 80 volumeStatsAggPeriod: 1m kubeletCgroups: "" systemCgroups: "" cgroupRoot: "" cgroupsPerQOS: true cgroupDriver: cgroupfs runtimeRequestTimeout: 10m hairpinMode: promiscuous-bridge maxPods: 220 podCIDR: "172.16.0.0/16" podPidsLimit: -1 resolvConf: /etc/resolv.conf maxOpenFiles: 1000000 kubeAPIQPS: 1000 kubeAPIBurst: 2000 serializeImagePulls: false evictionHard: memory.available: "100Mi" nodefs.available: "10%" nodefs.inodesFree: "5%" imagefs.available: "15%" evictionSoft: {} enableControllerAttachDetach: true failSwapOn: true containerLogMaxSize: 20Mi containerLogMaxFiles: 10 systemReserved: {} kubeReserved: {} systemReservedCgroup: "" kubeReservedCgroup: "" enforceNodeAllocatable: ["pods"] EOF
(需要修改配置文件,address: "10.66.10.31"
scp kubelet-config.yaml root@k8s-w-1:/etc/kubernetes/kubelet-config.yaml ... scp kubelet-config.yaml root@k8s-w-10:/etc/kubernetes/kubelet-config.yaml
cat > /etc/systemd/system/kubelet.service <<EOF [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=containerd.service Requires=containerd.service [Service] WorkingDirectory=/data/k8s/k8s/kubelet ExecStart=/opt/k8s/bin/kubelet \ --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/cert \ --root-dir=/data/k8s/k8s/kubelet \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --config=/etc/kubernetes/kubelet-config.yaml \ --hostname-override=k8s-w-1 \ --image-pull-progress-deadline=15m \ --volume-plugin-dir=/data/k8s/k8s/kubelet/kubelet-plugins/volume/exec/ \ --logtostderr=true \ --v=2 Restart=always RestartSec=5 StartLimitInterval=0 [Install] WantedBy=multi-user.target EO
scp /etc/systemd/system/kubelet.service root@$k8s-w-1:/etc/systemd/system/kubelet.service ... scp /etc/systemd/system/kubelet.service root@$k8s-w-10:/etc/systemd/system/kubelet.service
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes-master
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
mkdir -p /data/k8s/k8s/kubelet
/usr/sbin/swapoff -a
systemctl daemon-reload && systemctl restart kubelet && systemctl enable kubelet
After=docker.service Requires=docker.service
cd /opt/k8s/work/ cat > csr-crb.yaml <<EOF # Approve all CSRs for the group "system:bootstrappers" kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: auto-approve-csrs-for-group subjects: - kind: Group name: system:bootstrappers apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:nodeclient apiGroup: rbac.authorization.k8s.io --- # To let a node of the group "system:nodes" renew its own credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-client-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient apiGroup: rbac.authorization.k8s.io --- # A ClusterRole which instructs the CSR approver to approve a node requesting a # serving cert matching its client cert. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: approve-node-server-renewal-csr rules: - apiGroups: ["certificates.k8s.io"] resources: ["certificatesigningrequests/selfnodeserver"] verbs: ["create"] --- # To let a node of the group "system:nodes" renew its own server credentials kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: node-server-cert-renewal subjects: - kind: Group name: system:nodes apiGroup: rbac.authorization.k8s.io roleRef: kind: ClusterRole name: approve-node-server-renewal-csr apiGroup: rbac.authorization.k8s.io EOF kubectl apply -f csr-crb.yaml
kubectl get csr kubectl certificate approve csr-bjtp4
cd /opt/k8s/work cat > kube-proxy-csr.json <<EOF { "CN": "system:kube-proxy", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "BeiJing", "L": "BeiJing", "O": "k8s", "OU": "opsnull" } ] } EOF
system:kube-proxy
system:node-proxier
system:kube-proxy
system:node-proxier
kube-apiserver
cd /opt/k8s/work cfssl gencert -ca=/opt/k8s/work/ca.pem \ -ca-key=/opt/k8s/work/ca-key.pem \ -config=/opt/k8s/work/ca-config.json \ -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy ls kube-proxy*
cd /opt/k8s/work source /opt/k8s/bin/environment.sh kubectl config set-cluster kubernetes \ --certificate-authority=/opt/k8s/work/ca.pem \ --embed-certs=true \ --server=${KUBE_APISERVER} \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy \ --client-certificate=kube-proxy.pem \ --client-key=kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes \ --user=kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconf
cd /opt/k8s/work source /opt/k8s/bin/environment.sh for node_name in ${NODE_NAMES[@]} do echo ">>> ${node_name}" scp kube-proxy.kubeconfig root@${node_name}:/etc/kubernetes/ done
cd /opt/k8s/work source /opt/k8s/bin/environment.sh cat > kube-proxy-config.yaml.template <<EOF kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 clientConnection: burst: 200 kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig" qps: 100 bindAddress: 10.66.10.31 healthzBindAddress: 10.66.10.31:10256 metricsBindAddress: 10.66.10.31:10249 enableProfiling: true clusterCIDR: 172.16.0.0/16 hostnameOverride: k8s-w-1 mode: "ipvs" portRange: "" iptables: masqueradeAll: false ipvs: scheduler: rr excludeCIDRs: [] EOF
bindAddress
clientConnection.kubeconfig
clusterCIDR
--cluster-cidr
--cluster-cidr
--masquerade-all
hostnameOverride
mode
cd /opt/k8s/work scp kube-proxy-config.yaml.template root@k8s-w-1:/etc/kubernetes/kube-proxy-config.yaml ... scp kube-proxy-config.yaml.template root@k8s-w-10:/etc/kubernetes/kube-proxy-config.yaml
cd /opt/k8s/work source /opt/k8s/bin/environment.sh cat > /etc/systemd/system/kube-proxy.service <<EOF [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes After=network.target [Service] WorkingDirectory=/data/k8s/k8s/kube-proxy ExecStart=/opt/k8s/bin/kube-proxy \ --config=/etc/kubernetes/kube-proxy-config.yaml \ --logtostderr=true \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EO
cd /opt/k8s/work scp kube-proxy.service root@k8s-w-1:/etc/systemd/system/ .. scp kube-proxy.service root@k8s-w-10:/etc/systemd/system/
cd /opt/k8s/work mkdir -p /data/k8s/k8s/kube-proxy modprobe ip_vs_rr systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy
cd /opt/k8s/work git clone https://github.com/coredns/deployment.git mv deployment coredns-deployment
cd /opt/k8s/work/coredns-deployment/kubernetes ./deploy.sh -i 10.254.0.2 -d cluster.local | kubectl apply -f -
kubectl get svc,pods -n kube-system| grep coredns
cat > dig.yaml <<EOF apiVersion: v1 kind: Pod metadata: name: dig namespace: default spec: containers: - name: dig image: docker.io/azukiapp/dig command: - sleep - "3600" imagePullPolicy: IfNotPresent restartPolicy: Always EOF kubectl create -f dig.yaml
kubectl get pods
kubectl exec -i dig -n default nslookup kubernetes
kubectl exec -i dig -n default nslookup baidu.com
cd /opt/k8s/work/ wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/cloud/deploy.yaml mv deploy.yaml ingress-nginx.yaml kubectl apply -f ingress-nginx.yaml
kubectl label k8s-w-1 usage=ingress ... kubectl label k8s-w-3 usage=ingress
apiVersion: v1 kind: Namespace metadata: name: ingress-nginx labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx --- # Source: ingress-nginx/templates/controller-serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx namespace: ingress-nginx --- # Source: ingress-nginx/templates/controller-configmap.yaml apiVersion: v1 kind: ConfigMap metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller namespace: ingress-nginx data: --- # Source: ingress-nginx/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx rules: - apiGroups: - '' resources: - configmaps - endpoints - nodes - pods - secrets verbs: - list - watch - apiGroups: - '' resources: - nodes verbs: - get - apiGroups: - '' resources: - services verbs: - get - list - update - watch - apiGroups: - extensions - networking.k8s.io # k8s 1.14+ resources: - ingresses verbs: - get - list - watch - apiGroups: - '' resources: - events verbs: - create - patch - apiGroups: - extensions - networking.k8s.io # k8s 1.14+ resources: - ingresses/status verbs: - update - apiGroups: - networking.k8s.io # k8s 1.14+ resources: - ingressclasses verbs: - get - list - watch --- # Source: ingress-nginx/templates/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm name: ingress-nginx namespace: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: ingress-nginx subjects: - kind: ServiceAccount name: ingress-nginx namespace: ingress-nginx --- # Source: ingress-nginx/templates/controller-role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx namespace: ingress-nginx rules: - apiGroups: - '' resources: - namespaces verbs: - get - apiGroups: - '' resources: - configmaps - pods - secrets - endpoints verbs: - get - list - watch - apiGroups: - '' resources: - services verbs: - get - list - update - watch - apiGroups: - extensions - networking.k8s.io # k8s 1.14+ resources: - ingresses verbs: - get - list - watch - apiGroups: - extensions - networking.k8s.io # k8s 1.14+ resources: - ingresses/status verbs: - update - apiGroups: - networking.k8s.io # k8s 1.14+ resources: - ingressclasses verbs: - get - list - watch - apiGroups: - '' resources: - configmaps resourceNames: - ingress-controller-leader-nginx verbs: - get - update - apiGroups: - '' resources: - configmaps verbs: - create - apiGroups: - '' resources: - endpoints verbs: - create - get - update - apiGroups: - '' resources: - events verbs: - create - patch --- # Source: ingress-nginx/templates/controller-rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx namespace: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ingress-nginx subjects: - kind: ServiceAccount name: ingress-nginx namespace: ingress-nginx --- # Source: ingress-nginx/templates/controller-service-webhook.yaml apiVersion: v1 kind: Service metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller-admission namespace: ingress-nginx spec: type: ClusterIP ports: - name: https-webhook port: 443 targetPort: webhook selector: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller --- # Source: ingress-nginx/templates/controller-service.yaml apiVersion: v1 kind: Service metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller namespace: ingress-nginx spec: type: LoadBalancer externalTrafficPolicy: Local ports: - name: http port: 80 protocol: TCP targetPort: http - name: https port: 443 protocol: TCP targetPort: https selector: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller --- # Source: ingress-nginx/templates/controller-deployment.yaml apiVersion: apps/v1 kind: Deployment metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: controller name: ingress-nginx-controller namespace: ingress-nginx spec: replicas: 3 selector: matchLabels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller revisionHistoryLimit: 10 minReadySeconds: 0 template: metadata: labels: app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/component: controller spec: dnsPolicy: ClusterFirst hostNetwork: true nodeSelector: usage: ingress containers: - name: controller image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.32.0 imagePullPolicy: IfNotPresent lifecycle: preStop: exec: command: - /wait-shutdown args: - /nginx-ingress-controller - --publish-service=ingress-nginx/ingress-nginx-controller - --election-id=ingress-controller-leader - --ingress-class=nginx - --configmap=ingress-nginx/ingress-nginx-controller - --validating-webhook=:8443 - --validating-webhook-certificate=/usr/local/certificates/cert - --validating-webhook-key=/usr/local/certificates/key securityContext: capabilities: drop: - ALL add: - NET_BIND_SERVICE runAsUser: 101 allowPrivilegeEscalation: true env: - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace livenessProbe: httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 3 readinessProbe: httpGet: path: /healthz port: 10254 scheme: HTTP initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 1 successThreshold: 1 failureThreshold: 3 ports: - name: http containerPort: 80 protocol: TCP - name: https containerPort: 443 protocol: TCP - name: webhook containerPort: 8443 protocol: TCP volumeMounts: - name: webhook-cert mountPath: /usr/local/certificates/ readOnly: true resources: requests: cpu: 100m memory: 90Mi serviceAccountName: ingress-nginx terminationGracePeriodSeconds: 300 volumes: - name: webhook-cert secret: secretName: ingress-nginx-admission --- # Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook name: ingress-nginx-admission namespace: ingress-nginx webhooks: - name: validate.nginx.ingress.kubernetes.io rules: - apiGroups: - extensions - networking.k8s.io apiVersions: - v1beta1 operations: - CREATE - UPDATE resources: - ingresses failurePolicy: Fail clientConfig: service: namespace: ingress-nginx name: ingress-nginx-controller-admission path: /extensions/v1beta1/ingresses --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: ingress-nginx-admission annotations: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx rules: - apiGroups: - admissionregistration.k8s.io resources: - validatingwebhookconfigurations verbs: - get - update --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: ingress-nginx-admission annotations: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: ingress-nginx-admission subjects: - kind: ServiceAccount name: ingress-nginx-admission namespace: ingress-nginx --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-create annotations: helm.sh/hook: pre-install,pre-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx spec: template: metadata: name: ingress-nginx-admission-create labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: hostNetwork: true nodeSelector: kubernetes.io/hostname: k8s-w-1 containers: - name: create image: jettech/kube-webhook-certgen:v1.2.0 imagePullPolicy: IfNotPresent args: - create - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.ingress-nginx.svc - --namespace=ingress-nginx - --secret-name=ingress-nginx-admission restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission securityContext: runAsNonRoot: true runAsUser: 2000 --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml apiVersion: batch/v1 kind: Job metadata: name: ingress-nginx-admission-patch annotations: helm.sh/hook: post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx spec: template: metadata: name: ingress-nginx-admission-patch labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook spec: hostNetwork: true nodeSelector: kubernetes.io/hostname: k8s-w-1 containers: - name: patch image: jettech/kube-webhook-certgen:v1.2.0 imagePullPolicy: args: - patch - --webhook-name=ingress-nginx-admission - --namespace=ingress-nginx - --patch-mutating=false - --secret-name=ingress-nginx-admission - --patch-failure-policy=Fail restartPolicy: OnFailure serviceAccountName: ingress-nginx-admission securityContext: runAsNonRoot: true runAsUser: 2000 --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: ingress-nginx-admission annotations: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx rules: - apiGroups: - '' resources: - secrets verbs: - get - create --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: ingress-nginx-admission annotations: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: ingress-nginx-admission subjects: - kind: ServiceAccount name: ingress-nginx-admission namespace: ingress-nginx --- # Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount metadata: name: ingress-nginx-admission annotations: helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded labels: helm.sh/chart: ingress-nginx-2.0.3 app.kubernetes.io/name: ingress-nginx app.kubernetes.io/instance: ingress-nginx app.kubernetes.io/version: 0.32.0 app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: admission-webhook namespace: ingress-ngin
cat > nginx.yaml <<EOF apiVersion: apps/v1 kind: Deployment metadata: name: test-nginx labels: k8s-app: test-nginx spec: replicas: 2 selector: matchLabels: k8s-app: test-nginx template: metadata: labels: k8s-app: test-nginx spec: containers: - name: test-nginx image: nginx:1.7.9 imagePullPolicy: IfNotPresent ports: - containerPort: 80 EOF
cat > service-nginx.yaml <<EOF apiVersion: v1 kind: Service metadata: labels: k8s-app: test-nginx name: test-nginx spec: ports: - port: 80 protocol: TCP targetPort: 80 selector: k8s-app: test-nginx type: NodePort EOF
cat > ingress-nginx.yaml <<EOF apiVersion: extensions/v1beta1 kind: Ingress metadata: annotations: # kubernetes.io/ingress.class: "nginx" nginx.ingress.kubernetes.io/rewrite-target: / name: test-nginx spec: rules: - host: zying.haha.com http: paths: - backend: serviceName: test-nginx servicePort: 80 EOF