环境准备

一.安装Go环境

1.获取GO安装包
$ wget https://www.golangtc.com/static/go/1.9.2/go1.9.2.linux-amd64.tar.gz
2.解压安装包并创建目录
#创建language目录存放go程序--GOROOT
$ mkdir /opt/language/

#创建go及go下的三个目录存放go的包,二进制,源代码  --GOPATH
$ mkdir -p /go/{src,bin,pkg}
$ tar -xvf go1.9.2.linux-amd64.tar.gz -C /opt/language/
3.配置环境变量
$ sudo vim /etc/profile.d/go.sh
    export GOROOT=/opt/language/go
    export GOBIN=/opt/go/bin
    export GOPATH=/opt/go
    export PATH=$PATH:$GOROOT/bin
#加载环境变量文件
$ source /etc/profile.d/go.sh
4.host文件的编写
192.168.0.19 etcd1 
192.168.0.20 etcd2 
192.168.0.21 etcd3 
192.168.0.19 k8s-master-35-19 
192.168.0.20 k8s-master-35-20 
192.168.0.21 k8s-master-35-21 
192.168.0.22 k8s-slave-35-22
192.168.0.23 k8s-slave-35-23
192.168.0.25 k8s-slave-35-25
192.168.0.26 k8s-slave-35-26
192.168.0.27 k8s-slave-35-27
192.168.0.28 k8s-slave-35-28
192.168.0.29 k8s-slave-35-29
192.168.0.30 k8s-slave-35-30
5.配置ansible

本文分发和批量操作均用ansible进行

yum install ansible -y
vim /etc/ansible/hosts

二.安装证书生成工具

1.下载cfssl
mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfssl
cp cfssl /bin/cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64 cfssljson
cp cfssljson /bin/cfssljson

wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 cfssl-certinfo
cp cfssl-certinfo /bin/cfssl-certinfo

chmod +x *

2.生成配置文件并
mkdir /opt/ssl

cd /opt/ssl

cfssl print-defaults config > config.json

cfssl print-defaults csr > csr.json

3.修改配置文件
# config.json 文件

{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}

# csr.json 文件
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

4.生成秘钥
$ cfssl gencert -initca csr.json | cfssljson -bare ca

$ ll
总用量 20
-rw-r--r-- 1 root root 1001 2月  27 10:08 ca.csr
-rw------- 1 root root 1675 2月  27 10:08 ca-key.pem
-rw-r--r-- 1 root root 1363 2月  27 10:08 ca.pem
-rw-r--r-- 1 root root  397 2月  26 17:31 config.json
-rw-r--r-- 1 root root  266 2月  27 10:08 csr.json

5.颁发证书
# 创建证书目录
mkdir -p /etc/kubernetes/ssl

# 拷贝所有文件到目录下
cp * /etc/kubernetes/ssl

这里要将文件拷贝到所有的k8s 机器上

三.生成etcd秘钥并安装etcd

1.获取etcd
$ mkdir  /opt/go/src/github.com
$ get clone https://github.com/coreos/etcd.git
2.编译etcd
$ cd etcd
$ ./build
$ cd /opt/go/src/github.com/coreos/etcd/bin
$ cp etcd /bin
$ cp etcdctl /bin 

编译好后将etcd二进制文件拷贝到其他3台服务器下

编译好后的二进制文件在当前目录下的bin目录下

3.生成etcd秘钥

cd /opt/ssl
vi etcd-csr.json

{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.0.19",
    "192.168.0.20",
    "192.168.0.21"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}



cfssl gencert -ca=/opt/ssl/ca.pem \
  -ca-key=/opt/ssl/ca-key.pem \
  -config=/opt/ssl/config.json \
  -profile=kubernetes etcd-csr.json | /opt/cfssl/cfssljson -bare etcd


cp etcd* /etc/kubernetes/ssl
4.Unit文件编写

vim /etc/system/system/etcd.service

192.168.0.19
vi /root/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=root
# set GOMAXPROCS to number of processors
ExecStart=/bin/etcd \
  --name=etcd3 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.0.19:2380 \
  --listen-peer-urls=https://192.168.0.19:2380 \
  --listen-client-urls=https://192.168.0.19:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.0.19:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.0.19:2380,etcd2=https://192.168.0.20:2380,etcd3=https://192.168.0.21:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/etcd.service /etc/systemd/system
5.分发etcd unti配置

将文件复制到其他节点后,修改IP为本节点IP

scp etcd.service 192.168.0.20:/etc/systemd/system/
scp etcd.service 192.168.0.21:/etc/systemd/system/
6.配置完成后启动服务
mkdir /var/lib/etcd
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
7.查看etcd集群信息
7.1查看集群信息
etcdctl --endpoints=https://192.168.0.19:2379 --cert-file=/etc/kubernetes/ssl/etcd.pem --ca-file=/etc/kubernetes/ssl/ca.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem cluster-health
cluster is healt
7.2查看节点信息
etcdctl --endpoints=https://192.168.0.19:2379 --cert-file=/etc/kubernetes/ssl/etcd.pem --ca-file=/etc/kubernetes/ssl/ca.pem --key-file=/etc/kubernetes/ssl/etcd-key.pem member list

三、安装Kubernetes

1.下载代码、编译、分发

$ git clone https://github.com/kubernetes/kubernetes.git

$ make

$ ll /opt/app/k8s/source-k8s/kubernetes/_output/local/bin/linux/amd64
总用量 2.3G
-rwxr-xr-x 1 root root  54M 1月  28 11:10 apiextensions-apiserver
-rwxr-xr-x 1 root root 112M 1月  28 11:10 cloud-controller-manager
-rwxr-xr-x 1 root root 6.5M 1月  28 11:04 conversion-gen
-rwxr-xr-x 1 root root 6.5M 1月  28 11:03 deepcopy-gen
-rwxr-xr-x 1 root root 6.5M 1月  28 11:04 defaulter-gen
-rwxr-xr-x 1 root root 225M 1月  28 11:10 e2e_node.test
-rwxr-xr-x 1 root root 154M 1月  28 11:10 e2e.test
-rwxr-xr-x 1 root root  65M 1月  28 11:10 gendocs
-rwxr-xr-x 1 root root 167M 1月  28 11:10 genkubedocs
-rwxr-xr-x 1 root root 174M 1月  28 11:10 genman
-rwxr-xr-x 1 root root 5.0M 1月  28 11:10 genswaggertypedocs
-rwxr-xr-x 1 root root  65M 1月  28 11:10 genyaml
-rwxr-xr-x 1 root root 9.7M 1月  28 11:10 ginkgo
-rwxr-xr-x 1 root root  38M 1月  28 11:10 gke-certificates-controller
-rwxr-xr-x 1 root root 2.6M 1月  28 11:03 go-bindata
-rwxr-xr-x 1 root root 240M 1月  28 11:09 hyperkube
-rwxr-xr-x 1 root root 144M 1月  28 11:10 kubeadm
-rwxr-xr-x 1 root root  53M 1月  28 11:10 kube-aggregator
-rwxr-xr-x 1 root root 200M 1月  28 11:10 kube-apiserver
-rwxr-xr-x 1 root root 131M 1月  28 11:09 kube-controller-manager
-rwxr-xr-x 1 root root  65M 1月  28 11:10 kubectl
-rwxr-xr-x 1 root root 141M 1月  28 11:10 kubelet
-rwxr-xr-x 1 root root 142M 1月  28 11:10 kubemark
-rwxr-xr-x 1 root root  61M 1月  28 11:09 kube-proxy
-rwxr-xr-x 1 root root  59M 1月  28 11:10 kube-scheduler
-rwxr-xr-x 1 root root 6.0M 1月  28 11:10 linkcheck
-rwxr-xr-x 1 root root 2.1M 1月  28 11:10 mounter
-rwxr-xr-x 1 root root  12M 1月  28 11:04 openapi-gen
-rwxr-xr-x 1 root root 2.6M 1月  28 11:03 teststale

拷贝kubectl、kubelet、kube-apiserver、kube-controller-manager、kube-scheduler、kube-proxy所有主节点的/bin目录下,所有文件要有执行权限

拷贝kubectl、kube-proxy到所有slave节点的/bin目录下,所有文件要有执行权限

将整个目录拷贝到其他节点

2.安装docker并启动
2.1安装docker
yum-config-manager

yum -y install yum-util

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

yum install -y docker-ce


2.2磁盘分区
vim disk.sh

    #添加主分区
    disk_partition () {
        parted -s /dev/vdc mklabel msdos
        parted -s /dev/vdc mkpart primary 0 100%
    }
    
    #添加逻辑卷
    disk_lvm () {
        pvcreate /dev/vdc1
        vgcreate docker /dev/vdc1
        lvcreate --wipesignatures y -n thinpool docker -l 95%VG
        lvcreate --wipesignatures y -n thinpoolmeta docker -l 1%VG
        lvconvert -y --zero n -c 512K --thinpool docker/thinpool --poolmetadata docker/thinpoolmeta
        echo -e 'activation {\nthin_pool_autoextend_threshold=90\nthin_pool_autoextend_percent=20\n}' > /etc/lvm/profile/docker-thinpool.profile
        lvchange --metadataprofile docker-thinpool docker/thinpool
    }
    disk_partition
    disk_lvm

bash disk.sh
2.3修改docker配置文件
vim /etc/docker/daemon.json
{
"hosts": ["unix:///var/run/docker.sock"],
"storage-driver": "devicemapper",
"storage-opts": [
"dm.basesize=10G",
"dm.thinpooldev=/dev/mapper/docker-thinpool",
"dm.use_deferred_removal=true",
"dm.use_deferred_deletion=true"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "20m",
"max-file": "10"
  },      
"live-restore": false
}  
2.4启动docker
systemctl enable docker

systemctl start docker
3. 建立admin认证
mkdir /opt/ssl
cd /opt/ssl

vim admin-csr.json
{
    "CN": "admin",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "system.masters",
            "OU": "System"
        }
    ]
}
4.生成证书
$ cfssl gencert -ca=/opt/ssl/ca.pem \
-ca-key=/opt/ssl/ca-key.pem \
-config=/opt/ssl/config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
$ ll admin*
-rwxr-xr-x 1 root root 1009 3月   8 14:36 admin.csr
-rwxr-xr-x 1 root root  288 3月   8 14:32 admin-csr.json
-rwx--x--x 1 root root 1675 3月   8 14:36 admin-key.pem
-rwxr-xr-x 1 root root 1403 3月   8 14:36 admin.pem
5.配置 kubectl kubeconfig 文件

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.0.19:6443


# 配置 客户端认证

kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --embed-certs=true \
  --client-key=/etc/kubernetes/ssl/admin-key.pem
  


kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin


kubectl config use-context kubernetes

cp /root/.kube/config /etc/kubernetes/kubelet.kubeconfig

# 生成证书后,证书在/root/.kube/config

# 将上面配置的 kubeconfig 文件分发到其他机器

# 其他服务器创建目录

mkdir /root/.kube

scp /root/.kube/config 192.168.0.20:/root/.kube/

scp /root/.kube/config 192.168.0.21:/root/.kube/

分发文件后将文件的中IP地址修改为本机IP

四、创建kubernetes证书

1.创建json文件
vim kubernetes-csr.json
{
    "CN": "kuberenetes",
    "hosts": [
       "127.0.0.1",
       "192.168.0.19",
       "192.168.0.20",
       "192.168.0.21",
       "10.254.0.1",
       "kubernetes",
       "kubernetes.default",
       "kubernetes.default.svc",
       "kubernetes.default.svc.cluster",
       "kubernetes.default.svc.cluster.local"
]
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}

这里 hosts 字段中 三个 IP 分别为 127.0.0.1 本机10.39.13.21,10.39.13.22,10.39.13.23为 Master 的IP, 10.254.0.1 为 kubernetes SVC 的 IP, 一般是 部署网络的第一个IP , 如: 10.254.0.1 , 在启动完成后,我们使用 kubectl get svc , 就可以查看到

$ cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
-ca-key=/etc/kubernetes/ssl/ca-key.pem \
-config=/etc/kubernetes/ssl/config.json 、
-profile=kubernetes kubernetes-csr.json |cfssljson -bare kubernetes
$ll kubernetes*
-rwxr-xr-x 1 root root 1261 3月   9 10:17 kubernetes.csr
-rwxr-xr-x 1 root root  561 3月   9 10:16 kubernetes-csr.json
-rwx--x--x 1 root root 1675 3月   9 10:17 kubernetes-key.pem
-rwxr-xr-x 1 root root 1631 3月   9 10:17 kubernetes.pem
2.编写kube-apiserver.service unti文件
10.39.13.21
mkdir /root/service/
vi /root/service/kube-apiserver.service

[Unit]
Description=kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --advertise-address=192.168.0.19 \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/log/audit.log \
  --authorization-mode=AlwaysAllow \
  --bind-address=192.168.0.19 \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.0.19:2379,https://192.168.0.20:2379,https://192.168.0.21:2379 \
  --event-ttl=1h \
  --kubelet-https=true \
  --insecure-bind-address=192.168.0.19 \
  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \
  --service-account-key-file=/etc/kubernetes/ssl/ca.pem \
  --service-cluster-ip-range=10.254.0.0/16 \
  --service-node-port-range=30000-32000 \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --experimental-bootstrap-token-auth \
  --token-auth-file=/etc/kubernetes/token.csv \
  --v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/service/kube-apiserver.service /etc/systemd/system/
3.重启服务
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver
4.生成token
# 生成 token

[root@k8s-master-1 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
d51cd04061186edab41cf11abba63d5f


# 创建 token.csv 文件

/opt/ssl

vi token.csv

d51cd04061186edab41cf11abba63d5f,kubelet-bootstrap,10001,"system:kubelet-bootstrap"


# 拷贝

cp token.csv /etc/kubernetes/ssl/
3.编写 kube-controller-manager.service Unit文件
192.168.0.19
[Unit]
vi /root/service/kube-controller-manager.service

Description=kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=//bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://192.168.0.19:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=10.254.0.0/16 \
  --cluster-cidr=10.233.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

cp /root/service/kube-controller-manager.service /etc/systemd/system/kube-controller-manager.service
启动服务
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager
4.编写 kube-scheduler.service
192.168.0.19
vi /root/service/kube-scheduler.service 

[Unit]
Description=kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://192.168.0.19:8080 \
  --leader-elect=true \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

cp /root/service/kube-scheduler.service /etc/systemd/system/ 
启动服务
systemctl daemon-reload
systemctl enable scheduler
systemctl start scheduler
systemctl status scheduler
5.配置kubelet
kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.0.19:6443 \
  --kubeconfig=bootstrap.kubeconfig

# 配置客户端认证

kubectl config set-credentials kubelet-bootstrap \
  --token=11849e4f70904706ab3e631e70e6af0d \
  --kubeconfig=bootstrap.kubeconfig


# 配置关联

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig
  
  
# 配置默认关联
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷贝生成的 bootstrap.kubeconfig 文件

mv bootstrap.kubeconfig /etc/kubernetes/
编辑kubelet.service

vi /root/service/kubelet.service

[Unit]
Description=kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/bin/kubelet \
  --address=192.168.0.19 \
  --hostname-override=k8s-master-35-19 \
  --pod-infra-container-image=jicki/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --require-kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --cluster_dns=10.254.0.2 \
  --cluster_domain=cluster.local. \
  --hairpin-mode promiscuous-bridge \
  --allow-privileged=true \
  --serialize-image-pulls=false \
  --logtostderr=true \
  --network-plugin=cni \
  --v=2
Restart=on-failure
RestartSec=5

WantedBy=multi-user.target

cp /rot/service/kubelet.service /etc/systemd
启动kubelet
mkdir /var/lib/kubelet
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet
6.部署kube-proxy
建立证书
vi /opt/ssl

{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}


生成证书
cfssl gencert -ca=/opt/ssl/ca.pem \
  -ca-key=/opt/ssl/ca-key.pem \
  -config=/opt/ssl/config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
cp kube-proxy* /etc/kubernetes/ssl/
创建kube-proxy kubeconfig文件
# 配置集群

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.0.19:6443 \
  --kubeconfig=kube-proxy.kubeconfig


# 配置客户端认证

kubectl config set-credentials kube-proxy \
  --client-certificate=/etc/kubernetes/ssl/kube-proxy.pem \
  --client-key=/etc/kubernetes/ssl/kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig
  
  
# 配置关联

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig



# 配置默认关联
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

# 拷贝到目录
mv kube-proxy.kubeconfig /etc/kubernetes/

创建kube-proxy unti文件
vi /root/service/kube-proxy.service
[Unit]
Description=kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/bin/kube-proxy \
  --bind-address=192.168.0.19 \
  --hostname-override=k8s-master-35-19 \
  --cluster-cidr=10.254.0.0/16 \
  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
  --logtostderr=true \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

cp /root/service/kube-proxy /etc/systemd/system
启动服务
systemctl daemon-reload
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl status kube-proxy

添加其它主节点


拷贝service目录,修改unti文件中对应的IP地址为对应节点的IP,并创建好对应的目录
scp -r /root/service 192.168.0.20:/root
scp -r /root/service 192.168.0.21:/root

拷贝kubernetes目录,修改*.kubeconfig文件中对应的IP地址
scp -r /etc/kubernetes 192.168.0.20:/etc/
scp -r /etc/kubernetes 192.168.0.20:/etc/

添加Node节点(此处指写一个节点的配置)

修改hosts文件

vi /etc/hosts

192.168.0.19 kubernetes.default.svc.cluster.local
192.168.0.20 kubernetes.default.svc.cluster.local
192.168.0.21 kubernetes.default.svc.cluster.local

在修改配置文件

#拷贝kubelet.service、kube-proxy.service的配置文件
scp /root/service/kubelet.service /root/service/kube-proxy.service 192.168.0.22:/root


#修改对应的IP与主机名
cp /root/kubelet.service /etc/systemd/system
cp /root/kube-proxy.service /etc/systemd/system


#建立目录
mkdir /var/lib/kubelet
mkdir /var/lib/kube-proxy


#拷贝kubernetes目录到node节点中
scp -r /etc/kubernetes 192.168.0.22:/etc/

#将*.kubeconfig中的IP替换为kubernetes.default.svc.cluster.local


配置kube-dns

所需镜像
jicki/k8s-dns-sidecar-amd64:1.14.4
jicki/k8s-dns-kube-dns-amd64:1.14.4
jicki/k8s-dns-dnsmasq-nanny-amd64:1.14.4
mkdir /root/kube-dns

#拷贝源码文件中的kube-dns文件到kube-dns目录下

cd /opt/kubernetes/cluster/addons/dns

cp kubedns-sa.yaml kubedns-cm.yaml /root/kube-dns

cp kubedns-controller.yaml.base /root/kube-dns/kubedns-controller.yaml

cp kubedns-svc.yaml.base  /root/kube-dns/kubedns-svc.yaml

修改kubedns-controller.yaml文件
cd /root/kube-dns

vi kubedns-controller.yaml 

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.

# __MACHINE_GENERATED_WARNING__

apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    rollingUpdate:
      maxSurge: 10%
      maxUnavailable: 0
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      volumes:
      - name: kube-dns-config
        configMap:
          name: kube-dns
          optional: true
      containers:
      - name: kubedns
        image: reg.enncloud.cn/enncloud/k8s-dns-kube-dns-amd64:1.14.4  #修改镜像
        resources:
          # TODO: Set memory limits when we've profiled the container for large
          # clusters, then set request = limit to keep this container in
          # guaranteed class. Currently, this container falls into the
          # "burstable" category so the kubelet doesn't backoff from restarting it.
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        livenessProbe:
          httpGet:
            path: /healthcheck/kubedns
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /readiness
            port: 8081
            scheme: HTTP
          # we poll on pod startup for the Kubernetes master service and
          # only setup the /readiness HTTP server once that's available.
          initialDelaySeconds: 3
          timeoutSeconds: 5
        args:
        - --domain=cluster.local          #需要改__PILLAR__DNS__DOMAIN__为cluster.local
        - --dns-port=10053
        - --config-dir=/kube-dns-config
        - --v=2
        env:
        - name: PROMETHEUS_PORT
          value: "10055"
        ports:
        - containerPort: 10053
          name: dns-local
          protocol: UDP
        - containerPort: 10053
          name: dns-tcp-local
          protocol: TCP
        - containerPort: 10055
          name: metrics
          protocol: TCP
        volumeMounts:
        - name: kube-dns-config
          mountPath: /kube-dns-config
      - name: dnsmasq
        image: reg.enncloud.cn/enncloud/k8s-dns-dnsmasq-nanny-amd64:1.14.4  #修改镜像
        livenessProbe:
          httpGet:
            path: /healthcheck/dnsmasq
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - -v=2
        - -logtostderr
        - -configDir=/etc/k8s/dns/dnsmasq-nanny
        - -restartDnsmasq=true
        - --
        - -k
        - --cache-size=1000
        - --log-facility=-
        - --server=/cluster.local/127.0.0.1#10053    #需要改__PILLAR__DNS__DOMAIN__为cluster.local
        - --server=/in-addr.arpa/127.0.0.1#10053
        - --server=/ip6.arpa/127.0.0.1#10053
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
        resources:
          requests:
            cpu: 150m
            memory: 20Mi
        volumeMounts:
        - name: kube-dns-config
          mountPath: /etc/k8s/dns/dnsmasq-nanny
      - name: sidecar
        image: reg.enncloud.cn/enncloud/k8s-dns-sidecar-amd64:1.14.4   #修改镜像
        livenessProbe:
          httpGet:
            path: /metrics
            port: 10054
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        args:
        - --v=2
        - --logtostderr
        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A   #需要改__PILLAR__DNS__DOMAIN__为cluster.local
        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A      #需要改__PILLAR__DNS__DOMAIN__为cluster.local
        ports:
        - containerPort: 10054
          name: metrics
          protocol: TCP
        resources:
          requests:
            memory: 20Mi
            cpu: 10m
      dnsPolicy: Default  # Don't use cluster DNS.

修改

vi kubedns-svc.yaml 

# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "KubeDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.2   #__PILLAR__DNS__SERVER__该为10.254.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP

创建kube-dns服务
kubectl create -f .

配置calico组建

获取镜像
jicki/node:v1.3.0
jicki/cni:v1.9.1
jicki/kube-policy-controller:v0.6.0
获取文件并修改文件
wget http://docs.projectcalico.org/v2.3/getting-started/kubernetes/installation/hosted/calico.yaml

vi calico.yaml

# 注意修改如下选项:


  etcd_endpoints: "https://192.168.0.19:2379,https://192.168.0.20:2379,https://192.168.0.21:2379"
  
    etcd_ca: "/calico-secrets/etcd-ca"  
    etcd_cert: "/calico-secrets/etcd-cert"
    etcd_key: "/calico-secrets/etcd-key"  


# 这里面要写入 base64 的信息
# 分别执行括号内的命令,填写到 etcd-key , etcd-cert, etcd-ca 中,不用括号。


data:
  etcd-key: (cat /etc/kubernetes/ssl/etcd-key.pem | base64 | tr -d '\n')
  etcd-cert: (cat /etc/kubernetes/ssl/etcd.pem | base64 | tr -d '\n')
  etcd-ca: (cat /etc/kubernetes/ssl/ca.pem | base64 | tr -d '\n')


    - name: CALICO_IPV4POOL_CIDR
      value: "10.233.0.0/16"
      

创建calico
kubectl create -f calico.yaml

测试集群
# 创建一个 nginx deplyment

vi nginx.yaml

apiVersion: extensions/v1beta1 
kind: Deployment 
metadata: 
  name: nginx-dm
spec: 
  replicas: 2
  template: 
    metadata: 
      labels: 
        name: nginx 
    spec: 
      containers: 
        - name: nginx 
          image: nginx:alpine 
          imagePullPolicy: IfNotPresent
          ports: 
            - containerPort: 80
            
---

apiVersion: v1 
kind: Service
metadata: 
  name: nginx-svc 
spec: 
  ports: 
    - port: 80
      targetPort: 80
      protocol: TCP 
  selector: 
    name: nginx


kubectl  create nginx.yaml