kubernetes v1.21.3
containerd 1.4.3
zookeeper:3.4.10
一,创建zookeeper-pv
创建三个共享目录(可以使用nfs创建,略)
mkdir -p /data/share/pv/{zk01,zk02,zk03}
分别对应三节点zk集群中的三个pod的持久化目录
编写yaml创建zookeeper-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk01
namespace: zookeeper-cluster
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/share/pv/zk01
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk02
namespace: zookeeper-cluster
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/share/pv/zk02
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk03
namespace: zookeeper-cluster
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/share/pv/zk03
persistentVolumeReclaimPolicy: Recycle
---
[root@ck8s1 zk]# kubectl create -f zookeeper-pv.yaml
persistentvolume/k8s-pv-zk01 created
persistentvolume/k8s-pv-zk02 created
persistentvolume/k8s-pv-zk03 created
查看创建成功的pv
kubectl get pv -o wide
[root@ck8s1 zk]# kubectl get pv -o wide
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE
k8s-pv-zk01 5Gi RWO Recycle Available anything 5m15s Filesystem
k8s-pv-zk02 5Gi RWO Recycle Available anything 5m15s Filesystem
k8s-pv-zk03 5Gi RWO Recycle Available anything 5m15s Filesystem
二,创建zookeeper集群
使用statefulset去部署zk集群的三节点,使用刚刚创建的pv作为存储设备。
vim zookeeper.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: zookeeper-cluster
labels:H
app: zk
spec:
selector:
app: zk
clusterIP: None
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: zookeeper-cluster
labels:
app: zk
spec:
selector:
app: zk
type: NodePort
ports:
- name: client
port: 2181
nodePort: 21811
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
namespace: zookeeper-cluster
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: zookeeper-cluster
spec:
selector:
matchLabels:
app: zk # has to match .spec.template.metadata.labels
serviceName: "zk-hs"
replicas: 3 # by default is 1
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zk # has to match .spec.selector.matchLabels
spec:
containers:
- name: zk
imagePullPolicy: Always
image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
resources:
requests:
memory: "500Mi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
[root@ck8s1 zk]# kubectl apply -f zookeeper.yaml
namespace/zookeeper-cluster created
service/zk-hs created
service/zk-cs created
Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
poddisruptionbudget.policy/zk-pdb created
statefulset.apps/zk created
[root@ck8s1 zk]# kubectl get pod -n zookeeper-cluster
NAME READY STATUS RESTARTS AGE
zk-0 1/1 Running 0 14m
zk-1 1/1 Running 0 14m
zk-2 1/1 Running 0 14m
[root@ck8s1 zk]# kubectl get svc -n zookeeper-cluster
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
zk-cs NodePort 10.10.114.145 <none> 2181:30001/TCP 37s
zk-hs ClusterIP None <none> 2888/TCP,3888/TCP 38s
如上将2181端口通过nodePort映射给了30001,对外暴露出来。
三,验证zookeeper集群是否启动成功
通过kubectl exec -it zk-0 -n zookeeper-cluster /bin/sh 进入容器
通过命令zkServer.sh status 查看
[root@ck8s1 zk]# kubectl exec -it zk-1 -n zookeeper-cluster /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
#
[root@ck8s1 zk]# kubectl exec -it zk-0 -n zookeeper-cluster /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
#
[root@ck8s1 zk]# kubectl exec -it zk-2 -n zookeeper-cluster /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
#
验证完成
查看版本
登录到容器,执行echo stat|nc localhost 2181 查看版本
[root@ck8s1 zk]# kubectl exec -it zk-2 -n zookeeper-cluster /bin/sh
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
# echo stat|nc localhost 2181
Zookeeper version: 3.4.10-39d3a4f269333c922ed3db283be479f9deacaa0f, built on 03/23/2017 10:13 GMT
Clients:
/0:0:0:0:0:0:0:1:43926[0](queued=0,recved=1,sent=0)
Latency min/avg/max: 0/0/0
Received: 327
Sent: 326
Connections: 1
Outstanding: 0
Zxid: 0x200000000
Mode: leader
Node count: 4