标签(空格分隔): 容器实操之docker
一: 部署介绍
1.1 系统环境
系统:
CentOS7.9x64
主机名:
cat /etc/hosts
-----
172.16.10.26 flyfish26
----
因为docker 单机版本所以就只使用 flyfish26 机器部署
所需部署软件
flink-1.13.6-bin-scala_2.12.tgz
hadoop-3.2.4.tar.gz
jdk1.8.0_201.tar.gz
scala-2.12.15.tgz
hadoopfiles.tar.gz ## hadoop 的配置文件
服务器关闭selinux/firewalld/ 清空iptables 规则
二: 安装docker
2.1 安装docker
yum -y install wget jq psmisc vim net-tools nfs-utils telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl -y
# 关闭交换分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
swapoff -a && sysctl -w vm.swappiness=0
cat /etc/fstab
# /dev/mapper/centos-swap swap swap defaults 0 0
#
## 关闭 SeLinux
# setenforce 0
# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 修改内核参数
yum -y install bridge-utils
modprobe br_netfilter
vim /etc/sysctl.conf
-----
net.ipv4.ip_forward = 1
net.ipv6.conf.all.disable_ipv6 = 1
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
-----
sysctl -p
cat <<EOF >> /etc/security/limits.conf
* hard nofile 655360
* soft nofile 655360
* hard nproc 655360
* soft nproc 655360
* soft core 655360
* hard core 655360
root hard nofile 655360
root soft nofile 655360
EOF
###系统依赖包
yum install -y conntrack scoat ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
### 开启ipvs 转发
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack
1) RPM 包安装方法:
提供的官方网址
官方文档:https://docs.docker.com
# 安装依赖包
yum install -y yum-utils device-mapper-persistent-data lvm2
# 添加Docker软件包源
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
## 查看所有的可用版本
yum list docker-ce --showduplicates | sort -r
# 安装Docker CE
yum install -y docker-ce 【直接安装最新版本】
## 安装所需要的指定版本
#yum install docker-ce-19.03.15-3.el7 docker-ce-cli-19.03.15-3.el7
yum install docker-ce-20.10.24-3.el7 docker-ce-cli-20.10.24-3.el7 docker-ce-rootless-extras-20.10.24-3.el7
## 阿里云镜像加速器
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://dfmo7maf.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "2048m"
},
"storage-driver": "overlay2"
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo systemctl enable docker
## 添加镜像加速
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
# 启动Docker服务并设置开机启动
systemctl start docker
systemctl enable docker
2.2 封装系统镜像
docker pull centos:7
docker network create --driver bridge flink-br
docker run -tid --name centos-software --privileged=true centos:7 /usr/sbin/init
将所有安装所有安装包复制到容器里面
docker cp flink-1.13.6-bin-scala_2.12.tgz centos-software:/root/
docker cp hadoop-3.2.4.tar.gz centos-software:/root/
docker cp jdk1.8.0_201.tar.gz centos-software:/root/
docker cp scala-2.12.15.tgz centos-software:/root/
docker cp hadoopfiles.tar.gz centos-software:/root/
进入容器:
docker exec -it centos-software /bin/bash
装包:
yum install -y lrzsz git psmisc telnet subversion gpm vim net-tools curl htop openssh-server openssh-clients
---
mkdir -p /opt/bigdata
cd /root
tar -zxvf flink-1.13.6-bin-scala_2.12.tgz && cp flink-1.13.6-bin-scala_2.12 /opt/bigdata/flink
tar -zxvf hadoop-3.2.4.tar.gz && cp hadoop-3.2.4 /opt/bigdata/hadoop
tar -zxvf jdk1.8.0_201.tar.gz && cp jdk1.8.0_201 /opt/bigdata/jdk
tar -zxvf scala-2.12.15.tgz && mv scala-2.12.15 /opt/bigdata/scala
tar -zxvf hadoopfiles.tar.gz
cd /opt/bigdata/hadoop/etc/hadoop/
rm -rf *.cmd
mkdir xmlbak
mv core-site.xml xmlbak
mv hdfs-site.xml xmlbak
mv mapred-site.xml xmlbak
mv yarn-site.xml xmlbak
cd /root/
cd hadoopfiles/
cp -p core-site.xml hdfs-site.xml mapred-site.xml yarn-site.xml /opt/bigdata/hadoop/etc/hadoop/
vim works
## 填入所有容器节点
---
flink01
flink02
flink03
---
cd /opt/bigdata/flink/conf
vim +195 flink-conf.yaml
## 在195行下面增加
---
classloader.check-leaked-classloader: false
---
配置环境变量
vim /etc/profile
-----
### jdk
export JAVA_HOME=/opt/bigdata/jdk
export CLASSPATH=.:$JAVA_HOME/jre/lib:$JAVA_HOME/lib:$JAVA_HOME/lib/tools.jar
PATH=$PATH:$HOME/bin:$JAVA_HOME/bin
#### hadoop
export HADOOP_HOME=/opt/bigdata/hadoop
PATH=$PATH:$HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export HADOOP_CONF_DIR=/opt/bigdata/hadoop/etc/hadoop
export HADOOP_CLASSPATH=`hadoop classpath`
####scala
export SCALA_HOME=/opt/bigdata/scala
PATH=$PATH:$HOME/bin:$SCALA_HOME/bin:$SCALA_HOME/sbin
##flink
export FLINK_HOME=/opt/bigdata/flink
PATH=$PATH:$HOME/bin:$FLINK_HOME/bin:$FLINK_HOME/sbin
-----
source /etc/profile
java -version
hadoop version
---
提交 镜像:
docker commit centos-software centos7-software:v1
2.3 使用镜像开辟三个容器
## flink01 容器
docker run -itd --network flink-br --hostname flink01 --name flink01 -p 50070:50070 -p 8088:8088 -p 9000:9000 -p 16010:16010 -p 2181:2181 -p 8020:8020 -p 8080:8080 -p 9020:9020 -p 1022:22 -p 10000:10000 -p 10002:10002 -p 8381:8381 --privileged=true centos7-software:v1 /usr/sbin/init
## flink02 容器
docker run -itd --network flink-br --hostname flink02 --name flink02 -p 10020:10020 -p 19888:19888 -p 2022:22 --privileged=true centos7-software:v1 /usr/sbin/init
## flink03 容器
docker run -itd --network flink-br --hostname flink03 --name flink03 -p 3022:22 --privileged=true centos7-software:v1 /usr/sbin/init
进入flink01 容器
docker exec -it flink01 /bin/bash
做三个容器的root 无密钥互信
ssh-copy-id flink01
ssh-copy-id flink02
ssh-copy-id flink03
----
然后格式化Hadoop 文件系统
cd /opt/bigdata/hadoop/
hdfs namenode -format
flink01 容器:
启动Hadoop 的 hdfs 与 yarn
启动hdfs 的namenode 与 secondarynamenode与 datanode
cd /opt/bigdata/hadoop/sbin
./hadoop-daemon.sh start namenode
./hadoop-daemon.sh start secondarynamenode
./hadoop-daemon.sh start datanode
进入flink02 容器:
ssh flink02
cd /opt/bigdata/hadoop/sbin
./hadoop-daemon.sh start datanode
进入flink03 容器:
ssh flink03
cd /opt/bigdata/hadoop/sbin
./hadoop-daemon.sh start datanode
打开web
http://172.16.10.26:50070
进入flink01启动yarn:
cd /opt/bigdata/hadoop/sbin
./yarn-daemon.sh start resourcemanager
./yarn-daemon.sh start nodemanager
进入flink02容器启动
ssh flink02
cd /opt/bigdata/hadoop/sbin
./yarn-daemon.sh start nodemanager
./mr-jobhistory-daemon.sh start historyserver
进入flink03 容器:
ssh flink03
cd /opt/bigdata/hadoop/sbin
./yarn-daemon.sh start nodemanager
打开yarn 的web
http://172.16.10.26:8088
2.4 测试 flink on yarn
进入flink01 容器:
docker exec -it flink01 /bin/bash
cd /opt/bigdata/flink/
1. session 模式测试:
打开一个yarn的session
在提交Job前必须要开始yarn-session
# bin/yarn-session.sh -n 2 -tm 800 -s 1 -d
-n 表示申请2个容器,这⾥指的就是多少个taskmanager
-s 表示每个TaskManager的slots数量
-jm 表示每个JobManager的内存大小 单位:MB
-tm 表示每个TaskManager的内存⼤小 单位:MB
-d 表示以后台程序⽅式运行
提交任务:
bin/flink run /opt/bigdata/flink/examples/batch/WordCount.jar
pre-job 模式测试:.Per-Job模式(直接在YARN上提交FlinkJob):
使用per-job方式提交一个Job,当然像-yjm和-ytm这样的可以不写,会有默认值。
进入flink01 容器:
# cd /opt/bigdata/flink
# bin/flink run -t yarn-per-job -yjm 1024 -ytm 1024 /opt/bigdata/flink/examples/batch/WordCount.jar