useradd -g 1203 -u 1203 -d /data/etcd etcd
mkdir /data/etcd
chown etcd.etcd /data/etcd

scp -r etcd.conf master1:/etc/etcd/
scp -r etcd.service master1:/lib/systemd/system/




sqoop import  --connect jdbc:mysql://172.30.136.60:3306/user-center --username sip-gzh --password  'D2HCK8EA0WTivkMX'  --delete-target-dir --target-dir /data



sudo su - hdfs -c "sqoop import \"-Dorg.apache.sqoop.splitter.allow_text_splitter=true\" --connect jdbc:mysql://172.30.136.60:3306/user-center --username sip-gzh --password 'D2HCK8EA0WTivkMX' --hive-import --hive-overwrite --query 'select taxNo,areaCode,opSystem,sysType,browser,ScreenSize,updateTime from v_client_msg where updateTime BETWEEN DATE_SUB(CURDATE(), INTERVAL 1 DAY) AND DATE_SUB(CURDATE(), INTERVAL 0 DAY) and \$CONDITIONS' --fields-terminated-by '\t' --as-parquetfile --split-by taxNo --delete-target-dir --target-dir /data --hive-database middleground_source --hive-table v_client_msg"


c6ks3uC@vHwrAf5@

备份
步骤一,在新master1上进行
cp /etc/etcd
cp /usr/bin/etcd*
/lib/systemd/system/etcd.service


步骤二,在master2上进行
kubectl delete node master1

步骤三,备份
cp /root/.kube/config
chmod 400 /root/.kube/config
cp /etc/systemd/system/kube*
cp /opt/ssl
cp /etc/kubenetes


恢复
useradd -g 1203 -u 1203 -d /data/etcd etcd
mkdir /data/etcd
chown etcd.etcd /data/etcd

scp -r etcd.conf master1:/etc/etcd/
scp -r etcd.service master1:/lib/systemd/system/

# 删除集群里面挂掉节点信息
etcdctl -w table --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints=https://172.31.115.43:2379,https://172.31.115.44:2379,https://172.31.115.45:2379 endpoint status

etcdctl -w table --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints=https://172.31.115.43:2379,https://172.31.115.44:2379,https://172.31.115.45:2379 member list
+------------------+---------+-------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| 16e478be8adc0e80 | started | etcd3 | https://172.31.115.45:2380 | https://172.31.115.45:2379 | false |
| 447c4ba4cce9a31c | started | etcd2 | https://172.31.115.44:2380 | https://172.31.115.44:2379 | false |
| 4d2d3bb18264c67a | started | etcd1 | https://172.31.115.43:2380 | https://172.31.115.43:2379 | false |
+------------------+---------+-------+----------------------------+----------------------------+------------+

关闭etcd1
[root@master1 etcd] systemctl stop etcd
[root@master1 etcd] rm /data/etcd -rf ; rm /etc/etcd -rf

[root@master2 ~] # 找到挂掉节点的id
etcdctl --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem https://172.31.115.44:2379,https://172.31.115.45:2379 member remove 4d2d3bb18264c67a

[root@mastter2 ~]
etcdctl -w table --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints=https://172.31.115.43:2379,https://172.31.115.44:2379,https://172.31.115.45:2379 member list
+------------------+---------+-------+----------------------------+----------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------+----------------------------+----------------------------+------------+
| 16e478be8adc0e80 | started | etcd3 | https://172.31.115.45:2380 | https://172.31.115.45:2379 | false |
| 447c4ba4cce9a31c | started | etcd2 | https://172.31.115.44:2380 | https://172.31.115.44:2379 | false |
+------------------+---------+-------+----------------------------+----------------------------+------------+


[root@master2 etc]# etcdctl --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem --endpoints=https://172.31.115.44:2379,https://172.31.115.45:2379 member add etcd1 --peer-urls=https://172.31.115.43:2380

ETCD_NAME="etcd1"
ETCD_INITIAL_CLUSTER="etcd3=https://172.31.115.45:2380,etcd2=https://172.31.115.44:2380,etcd1=https://172.31.115.43:2380"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.31.115.43:2380"
ETCD_INITIAL_CLUSTER_STATE="existing"


# etcd1 启动配置文件里面的new改成existing后再启动,就正常了
ETCD_INITIAL_CLUSTER_STATE="existing"

[root@master1 etcd]# systemctl restart etcd

[root@master2 kubernetes]# kubeadm token list --kubeconfig ~/.kube/config
[root@master1 kubernetes]# kubeadm token delete --kubeconfig ~/.kube/config 1tkq7l.zlqwhh7baze4wryp



[root@master1 kubernetes]# rm /etc/kubernetes/ssl/kubelet* -rf
[root@master1 kubernetes]# systemctl start kubelet


[root@master1 ssl]# kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve