1、系统准备
1.1、系统版本
[root@hisdb1 ~]# cat /etc/centos-release
CentOS Linux release 7.9.2009 (Core)
#准备两台虚拟机
1.2、移除虚拟嵌套
[root@hisdb1 ~]# yum remove libvirt-libs
#移除两节点虚拟嵌套功能
2、网络配置
备注:2.1-2.2 双节点执行
2.1、添加网卡
分别给两台主机添加1块网卡
启动系统
[root@hisdb1 ~]# nmcli con show
NAME UUID TYPE DEVICE
ens33 b08da71d-7032-400a-943c-27c60dc879f3 ethernet ens33
Wired connection 1 2235a629-6bb6-3a33-8d64-f01f113a2a84 ethernet ens37
[root@hisdb2 ~]# nmcli con show
NAME UUID TYPE DEVICE
ens33 77451799-e66d-4d4b-869c-ccb54abcd8f6 ethernet ens33
Wired connection 1 1f744403-9ba8-3b58-9112-c4bdcb9528a4 ethernet ens37
ens37缺少配置文件,拷贝ens33的
然后按如下修改:
[root@hisdb1 network-scripts]# cat ifcfg-ens37
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=noIPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens37
UUID=2235a629-6bb6-3a33-8d64-f01f113a2a84
DEVICE=ens37
ONBOOT=yes
IPADDR=192.168.11.132
HWADDR=00:0c:29:75:f5:9b
PREFIX=24
#GATEWAY=192.168.133.2
#DNS1=192.168.133.2
IPV6_PRIVACY=no
[root@hisdb1 network-scripts]# service network restart
注意:ens33与ens37中BOOTPROTO=none均需修改为static
2.2、修改hosts文件
# cat <<EOF>>/etc/hosts
#Public IP (ens33)
192.168.133.112 hisdb1
192.168.133.113 hisdb2
#Private IP (ens37)
192.168.11.132 hisdb1-priv
192.168.11.133 hisdb2-priv
#Virtual IP
192.168.133.114 hisdb1-vip
192.168.133.115 hisdb2-vip
#Scan IP
192.168.133.116 hisdb-scan
EOF
#此处公网和私网能ping通,其它三个不能ping通才正常
3、安装准备
备注:3.1-3.13两个节点执行
3.1、配置Yum源
[root@hisdb1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 3.8G 0 3.8G 0% /dev
tmpfs 3.9G 0 3.9G 0% /dev/shm
tmpfs 3.9G 21M 3.8G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/mapper/centos-root 90G 4.6G 86G 6% /
/dev/sda1 2.0G 185M 1.9G 10% /boot
tmpfs 781M 0 781M 0% /run/user/0
tmpfs 781M 48K 781M 1% /run/user/1000
/dev/sr0 4.4G 4.4G 0 100% /run/media/liujun/CentOS 7 x86_64
[root@hisdb1 ~]# mount /dev/sr0 /mnt
[root@hisdb1 yum.repos.d]# cat
<<EOF>>/etc/yum.repos.d/local.repo[local]
name=local
baseurl=file:///mnt
gpgcheck=0
enabled=1
EOF
[root@hisdb1 yum.repos.d]# yum makecache
3.2、安装依赖包
[root@hisdb1 yum.repos.d]# yum groupinstall -y "Server withGUI"
yum install -y bc \
binutils \
compat-libcap1 \
compat-libstdc++-33 \
gcc \
gcc-c++ \
elfutils-libelf \
elfutils-libelf-devel \
glibc \
glibc-devel \
ksh \
libaio \
libaio-devel \
libgcc \
libstdc++ \
libstdc++-devel \
libxcb \
libX11 \
libXau \
libXi \
libXtst \
libXrender \
libXrender-devel \
make \
net-tools \
nfs-utils \
smartmontools \
sysstat \
e2fsprogs \
e2fsprogs-libs \
fontconfig-devel \
expect \
unzip \
openssh-clients \
readline* \
tigervnc* \
psmisc --skip-broken
#手动上传并安装依赖包:pdksh-5.2.14-37.el5.x86_64.rpm和compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm
[root@hisdb1 yum.repos.d]# rpm -ivh
compat-libstdc++-33-3.2.3-72.el7.x86_64.rpm[root@hisdb1 yum.repos.d]# rpm -e ksh-20120801-142.el7.x86_64
[root@hisdb1 yum.repos.d]# rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm
检查依赖包安装情况
# rpm -q bc binutils compat-libcap1 compat-libstdc++-33 gcc gcc-c++
elfutils-libelf elfutils-libelf-devel glibc glibc-devel ksh libaio libaio-devel
libgcc libstdc++ libstdc++-devel libxcb libX11 libXau libXi libXtst libXrender
libXrender-devel make net-tools nfs-utils smartmontools sysstat e2fsprogs
e2fsprogs-libs fontconfig-devel expect unzip openssh-clients readline | grep
"not installed"
3.3、关闭防火墙
# systemctl status firewalld.service
# systemctl stop firewalld.service
# systemctl disable firewalld.service
3.4、禁用selinux
将SELINUX修改为disabled
|
注意:需重启主机,才能生效
3.5、时间同步配置
禁用chronyd
# yum -y install chrony
# timedatectl set-timezone Asia/Shanghai
# systemctl stop chronyd.service
# systemctl disable chronyd.service
禁用ntpd
# yum -y install ntpdate
# cat <<EOF>>/var/spool/cron/root
00 12 * * * /usr/sbin/ntpdate -u 182.92.12.11 &&
/usr/sbin/hwclock -wEOF
注意:182.92.12.11为时间服务器,每天12点同步系统时间
查看任务
# crontab -l
00 12 * * * /usr/sbin/ntpdate -u 182.92.12.11 &&/usr/sbin/hwclock –w
手动执行
# /usr/sbin/ntpdate -u 182.92.12.11 && /usr/sbin/hwclock -w
8 Feb 09:55:44 ntpdate[58415]:step time server 182.92.12.11 offset 2.185336 sec
3.6、关闭透明大页和numa
关闭透明大页和numa,重启生效
[root@hisdb1 ~]# cat /etc/default/grub
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g'
/etc/system-release)"GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline
rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet"GRUB_DISABLE_RECOVERY="true"
[root@hisdb1 ~]# sed -i 's/quiet/quiet transparent_hugepage=never
numa=off/' /etc/default/grub[root@hisdb1 ~]# cat /etc/default/grub
GRUB_TIMEOUT=5
GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g'
/etc/system-release)"GRUB_DEFAULT=saved
GRUB_DISABLE_SUBMENU=true
GRUB_TERMINAL_OUTPUT="console"
GRUB_CMDLINE_LINUX="crashkernel=auto spectre_v2=retpoline
rd.lvm.lv=centos/root rd.lvm.lv=centos/swap rhgb quiet
transparent_hugepage=never numa=off"GRUB_DISABLE_RECOVERY="true"
[root@hisdb1 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
Generating grub configuration file ...
Found linux image: /boot/vmlinuz-3.10.0-1160.el7.x86_64
Found initrd image: /boot/initramfs-3.10.0-1160.el7.x86_64.img
Found linux image: /boot/vmlinuz-0-rescue-954f0e9a62d9487e8bcb8e8598b71fab
Found initrd image:
/boot/initramfs-0-rescue-954f0e9a62d9487e8bcb8e8598b71fab.imgdone
重启后检查是否生效
[root@hisdb1 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled
[always] madvise never
[root@hisdb1 ~]# cat /proc/cmdline
BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/centos-root
ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root
rd.lvm.lv=centos/swap rhgb quiet LANG=en_US.UTF-8重启后结果
[root@hisdb1 ~]# cat /sys/kernel/mm/transparent_hugepage/enabled
always madvise [never]
[root@hisdb1 ~]# cat /proc/cmdline
BOOT_IMAGE=/vmlinuz-3.10.0-1160.el7.x86_64 root=/dev/mapper/centos-root
ro crashkernel=auto spectre_v2=retpoline rd.lvm.lv=centos/root
rd.lvm.lv=centos/swap rhgb quiet transparent_hugepage=never numa=off3.7、avahi-daemon配置
[root@hisdb1 ~]# yum install -y avahi*
[root@hisdb1 ~]# systemctl stop avahi-daemon.socket
[root@hisdb1 ~]# systemctl stop avahi-daemon.service
[root@hisdb1 ~]# pgrep -f avahi-daemon | awk '{print "kill -9"$2}'
配置NOZEROCONF=yes
[root@hisdb1 ~]# cat <<EOF>>/etc/sysconfig/network
NOZEROCONF=yes
EOF
查看状态
[root@hisdb1 ~]# systemctl status avahi-daemon.socket
● avahi-daemon.socket - Avahi mDNS/DNS-SD Stack
Activation SocketLoaded: loaded
(/usr/lib/systemd/system/avahi-daemon.socket; enabled; vendor preset: enabled)Active: inactive (dead) since
Tue 2022-02-08 10:15:49 CST; 7min agoListen:
/var/run/avahi-daemon/socket (Stream)Feb 08 10:10:20 hisdb1 systemd[1]: Listening on Avahi mDNS/DNS-SD Stack
Activation Socket.Feb 08 10:15:49 hisdb1 systemd[1]: Closed Avahi mDNS/DNS-SD Stack
Activation Socket.[root@hisdb1 ~]# systemctl status avahi-daemon.service
● avahi-daemon.service - Avahi mDNS/DNS-SD Stack
Loaded: loaded
(/usr/lib/systemd/system/avahi-daemon.service; enabled; vendor preset: enabled)Active: inactive (dead) since
Tue 2022-02-08 10:15:49 CST; 8min agoProcess: 763
ExecStart=/usr/sbin/avahi-daemon -s (code=exited, status=0/SUCCESS)Main PID: 763 (code=exited,
status=0/SUCCESS)Status: "avahi-daemon
0.6.31 starting up."Feb 08 10:15:20 hisdb1 avahi-daemon[763]: Files changed, reloading.
Feb 08 10:15:20 hisdb1 avahi-daemon[763]: No service file found in
/etc/avahi/services.Feb 08 10:15:20 hisdb1 avahi-daemon[763]: Files changed, reloading.
Feb 08 10:15:20 hisdb1 avahi-daemon[763]: No service file found in
/etc/avahi/services.Feb 08 10:15:49 hisdb1 avahi-daemon[763]: Got SIGTERM, quitting.
Feb 08 10:15:49 hisdb1 systemd[1]: Stopping Avahi mDNS/DNS-SD Stack...
Feb 08 10:15:49 hisdb1 avahi-daemon[763]: Leaving mDNS multicast group on
interface ens37.IPv4 with address 192.168.11.132.Feb 08 10:15:49 hisdb1 avahi-daemon[763]: Leaving mDNS multicast group on
interface ens33.IPv4 with address 192.168.133.112.Feb 08 10:15:49 hisdb1 avahi-daemon[763]: avahi-daemon 0.6.31 exiting.
Feb 08 10:15:49 hisdb1 systemd[1]: Stopped Avahi mDNS/DNS-SD Stack.
3.8、修改/etc/sysctl.conf文件
# cat <<EOF>>/etc/sysctl.conf
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 8181829631
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.ens33.rp_filter = 1
net.ipv4.conf.ens37.rp_filter = 2
EOF
# /sbin/sysctl -p
3.9、修改用户限制
[root@hisdb1 ~]# cat <<EOF>>/etc/security/limits.conf
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
oracle soft nproc 2047
oracle hard nproc 16384
oracle hard memlock 134217728
oracle soft memlock 134217728
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
grid soft nproc 2047
grid hard nproc 16384
EOF
3.10、修改/etc/pam.d/login文件
[root@hisdb1 ~]# cat <<EOF>>/etc/pam.d/login
session required pam_limits.so
session required /lib64/security/pam_limits.so
EOF
3.11、创建用户、用户组、目录
创建组
groupadd oinstall
groupadd dba
groupadd oper
groupadd backupdba
groupadd dgdba
groupadd kmdba
groupadd asmdba
groupadd asmoper
groupadd asmadmin
groupadd racdba
创建用户
useradd -g oinstall -G asmadmin,asmdba,asmoper,dba,racdba,oper grid
useradd -g oinstall -G asmdba,dba,backupdba,dgdba,kmdba,racdba,oper oracle
修改密码
echo "oracle" |passwd oracle --stdin
echo "grid" |passwd grid --stdin
创建目录
mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle/product/11.2.0/db
mkdir -p /u01/app/oraInventory
mkdir -p /backup
mkdir -p /home/oracle/scripts
chown -R oracle:oinstall /backup
chown -R oracle:oinstall /home/oracle/scripts
chown -R grid:oinstall /u01
chown -R grid:oinstall /u01/app/grid
chown -R grid:oinstall /u01/app/11.2.0/grid
chown -R grid:oinstall /u01/app/oraInventory
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01
3.12、设置grid和oracle用户的环境变量
grid用户:
$ cat <<EOF>>/home/grid/.bash_profile
umask 022
export TMP=/tmp
export TMPDIR=\$TMP
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_TERM=xterm
export TNS_ADMIN=\$ORACLE_HOME/network/admin
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib
export ORACLE_SID=+ASM1
export PATH=/usr/sbin:\$PATH
export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$PATH
alias sas='sqlplus / as sysasm'
export PS1="[\`whoami\`@\`hostname\`:"'\$PWD]\$ '
EOF
$ source .bash_profile
注意:节点2为+ASM2
oracle用户:
$ cat <<EOF>>/home/oracle/.bash_profile
umask 022
export TMP=/tmp
export TMPDIR=\$TMP
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=\$ORACLE_BASE/product/11.2.0/db
export ORACLE_TERM=xterm
export TNS_ADMIN=\$ORACLE_HOME/network/admin
export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib
export ORACLE_SID=ORCL1
export PATH=/usr/sbin:\$PATH
export PATH=\$ORACLE_HOME/bin:\$ORACLE_HOME/OPatch:\$PATH
EOF
$ source .bash_profile
注意:节点2 为ORCL2
3.13、修改/etc/profile文件
# vim /etc/profile
末尾添加:
if [ $USER =
"oracle" ] || [ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
umask 022
fi
#source /etc/profile
备注:此为设置系统限制
4、存储配置
备注:4.1-4.2在windows执行,4.3双节点执行
共享存储可以使用第三方软件提供的方式来共享,也可以使用 WMware Workstation软件进行存储共享,或者使用ISCSI网络存储服务来配置共享存储.
本次使用ISCSI网络存储服务来模拟共享存储,常用ISCSI配置共享存储的软件:Openfiler 和 StarWind,本次讲解StarWind 软件.
4.1、安装StarWind
说明:StarWind软件安装在windows 8.
解压starwind6_jb51安装包
双击starwind.6.0.5713
starwind安装完毕,注意安装时需将杀毒软件关闭.
4.2、配置starwind iscsi
通过软件配置共享存储,打开starwind软件
新建starwind server
cmd命令查看本机ip
填写本机ip,点击ok
新建成功后,选择server,双击connect连接:
填写target别名
添加device存储盘
选择虚拟硬盘
选择镜像文件
创建新的虚拟盘
选择镜像文件路径和大小
选择已有target
starwind共享存储配置完成,按此方法再添加2块5g ocr磁盘,5块data磁盘.
备注:第一次只配置了3块data磁盘,后面安装gi时发现磁盘组+DATA空间不够后额外添加2块磁盘给磁盘组扩容.
4.3、连接共享存储
linux通过iscsi连接共享存储
4.3.1、linux客户端安装iscsi
# yum install -y iscsi-initiator-utils*
4.3.2、搜索服务端iscsi target
# iscsiadm -m discovery -t st -p 192.168.133.159
192.168.133.159:3260,-1 iqn.2008-08.com.starwindsoftware:192.168.133.159-liujun
4.3.3、连接服务端iscsi共享存储
# iscsiadm -m node -T iqn.2008-08.com.starwindsoftware:192.168.133.159-liujun -p 192.168.133.159 -l
Logging in to [iface: default,
target: iqn.2008-08.com.starwindsoftware:192.168.133.159-liujun, portal:
192.168.133.159,3260] (multiple)Login to [iface: default, target:
iqn.2008-08.com.starwindsoftware:192.168.133.159-liujun, portal:
192.168.133.159,3260] successful.
4.3.4、linux客户端查看共享存储
[root@hisdb1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0
100G 0 disk├─sda1 8:1 0
2G 0 part /boot└─sda2 8:2 0
98G 0 part├─centos-root 253:0 0 90G 0
lvm /
└─centos-swap 253:1 0 8G 0
lvm [SWAP]sdb 8:16 0
5G 0 disk└─ocr_1 253:3 0
5G 0 mpathsdc 8:32 0
5G 0 disk└─ocr_2 253:2 0
5G 0 mpathsdd 8:48 0
5G 0 disk└─ocr_3 253:4 0
5G 0 mpathsde 8:64 0
10G 0 disk└─data_1 253:7 0
10G 0 mpathsdf 8:80 0
10G 0 disk└─data_2 253:6 0
10G 0 mpathsdg 8:96 0
10G 0 disk└─data_3 253:9 0
10G 0 mpathsdh 8:112 0
10G 0 disk└─data_4 253:5 0
10G 0 mpathsdi 8:128 0
10G 0 disk└─data_5 253:8 0
10G 0 mpathsr0 11:0
1 4.4G 0 rom
如上所示,sdb至sdi为挂载磁盘
5、multipath
备注:5.1-5.5两个节点执行
5.1、安装multipath
[root@hisdb1 ~]# yum -y install device-mapper*
[root@hisdb1 ~]# mpathconf --enable --with_multipathd y
5.2、查看共享盘的scsi_id
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdb
2949b33226e13a67e
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdc
2c8b83cb31aac6f39
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdd
2db2a03bea26bf86f
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sde
28b5c47889192de60
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdf
259760f4c88f48992
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdg
2507b8b00f05efdda
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdh
27328bdb3122301cd
[root@hisdb1 ~]#/usr/lib/udev/scsi_id -g -u /dev/sdi
23f9852c09f79cbd8
5.3、配置multipath
wwid值为上面获取的scsi_id,alias可自定义,这里配置3块OCR盘,5块DATA盘
[root@hisdb1 ~]# cat
<<EOF>/etc/multipath.confdefaults {
user_friendly_names yes
}
blacklist {
devnode "^sda"}
multipaths {
multipath {
wwid "2949b33226e13a67e"
alias ocr_1
}
multipath {
wwid "2c8b83cb31aac6f39"
alias ocr_2
}
multipath {
wwid "2db2a03bea26bf86f"
alias ocr_3
}multipath {
wwid "28b5c47889192de60"
alias data_1
}
multipath {
wwid "259760f4c88f48992"
alias data_2
}multipath {
wwid "2507b8b00f05efdda"
alias data_3
}
multipath {
wwid
"27328bdb3122301cd"alias data_4
}
multipath {
wwid
"23f9852c09f79cbd8"alias data_5
}
}
EOF
5.4、激活multipath多路径
[root@hisdb1 ~]# multipath -F
[root@hisdb1 ~]# multipath -v2
create: ocr_1 (2949b33226e13a67e) undef ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:0 sdb
8:16 undef ready runningcreate: ocr_2 (2c8b83cb31aac6f39) undef ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:1 sdc
8:32 undef ready runningcreate: ocr_3 (2db2a03bea26bf86f) undef ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:2 sdd
8:48 undef ready runningcreate: data_1 (28b5c47889192de60) undef ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:3 sde
8:64 undef ready runningcreate: data_2 (259760f4c88f48992) undef ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:4 sdf
8:80 undef ready runningcreate: data_3 (2507b8b00f05efdda) undef ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:5 sdg
8:96 undef ready runningcreate: data_4 (27328bdb3122301cd) undef ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:6 sdh
8:112 undef ready runningcreate: data_5 (23f9852c09f79cbd8) undef ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=undef
`-+- policy='service-time 0' prio=1 status=undef
`- 3:0:0:7 sdi
8:128 undef ready running[root@hisdb1 ~]# multipath -ll
data_5 (23f9852c09f79cbd8) dm-9 ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:7 sdi
8:128 active ready runningdata_4 (27328bdb3122301cd) dm-8 ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:6 sdh
8:112 active ready runningdata_3 (2507b8b00f05efdda) dm-7 ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:5 sdg
8:96 active ready runningdata_2 (259760f4c88f48992) dm-6 ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:4 sdf
8:80 active ready runningocr_3 (2db2a03bea26bf86f) dm-4 ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:2 sdd
8:48 active ready runningdata_1 (28b5c47889192de60) dm-5 ROCKET ,IMAGEFILE
size=10G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:3 sde 8:64
active ready runningocr_2 (2c8b83cb31aac6f39) dm-3 ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:1 sdc
8:32 active ready runningocr_1 (2949b33226e13a67e) dm-2 ROCKET ,IMAGEFILE
size=5.0G features='0' hwhandler='0' wp=rw
`-+- policy='service-time 0' prio=1 status=active
`- 3:0:0:0 sdb
8:16 active ready running
5.5、绑盘
[root@hisdb1 rules.d]# for i in b c d e f g h i; do
echo
"KERNEL==\"sd*\",ENV{DEVTYPE}==\"disk\",SUBSYSTEM==\"block\",PROGRAM==\"/usr/lib/udev/scsi_id
-g -u -d \$devnode\",RESULT==\"`/usr/lib/udev/scsi_id -g -u
/dev/sd$i`\", RUN+=\"/bin/sh -c 'mknod /dev/asm-disk$i b \$major \$minor; chown grid:asmadmin
/dev/asm-disk$i; chmod 0660 /dev/asm-disk$i'\"" >>
/etc/udev/rules.d/99-oracle-asmdevices.rulesdone
[root@hisdb1 rules.d]# cat 99-oracle-asmdevices.rules
KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="2949b33226e13a67e", RUN+="/bin/sh
-c 'mknod /dev/asm-diskb b $major
$minor; chown grid:asmadmin /dev/asm-diskb; chmod 0660 /dev/asm-diskb'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="2c8b83cb31aac6f39", RUN+="/bin/sh
-c 'mknod /dev/asm-diskc b $major
$minor; chown grid:asmadmin /dev/asm-diskc; chmod 0660 /dev/asm-diskc'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="2db2a03bea26bf86f",
RUN+="/bin/sh -c 'mknod /dev/asm-diskd b
$major $minor; chown grid:asmadmin /dev/asm-diskd; chmod 0660
/dev/asm-diskd'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="28b5c47889192de60",
RUN+="/bin/sh -c 'mknod /dev/asm-diske b
$major $minor; chown grid:asmadmin /dev/asm-diske; chmod 0660
/dev/asm-diske'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="259760f4c88f48992",
RUN+="/bin/sh -c 'mknod /dev/asm-diskf b
$major $minor; chown grid:asmadmin /dev/asm-diskf; chmod 0660
/dev/asm-diskf'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="2507b8b00f05efdda",
RUN+="/bin/sh -c 'mknod /dev/asm-diskg b
$major $minor; chown grid:asmadmin /dev/asm-diskg; chmod 0660
/dev/asm-diskg'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="27328bdb3122301cd",
RUN+="/bin/sh -c 'mknod /dev/asm-diskh b
$major $minor; chown grid:asmadmin /dev/asm-diskh; chmod 0660
/dev/asm-diskh'"KERNEL=="sd*",ENV{DEVTYPE}=="disk",SUBSYSTEM=="block",PROGRAM=="/usr/lib/udev/scsi_id
-g -u -d $devnode",RESULT=="23f9852c09f79cbd8",
RUN+="/bin/sh -c 'mknod /dev/asm-diski b
$major $minor; chown grid:asmadmin /dev/asm-diski; chmod 0660
/dev/asm-diski'"
6、软件安装包
节点1执行
# mkdir /soft
上传安装介质到/soft目录
p13390677_112040_Linux-x86-64_1of7.zip
p13390677_112040_Linux-x86-64_2of7.zip
p13390677_112040_Linux-x86-64_3of7.zip
解压安装介质
cd /soft
unzip -q p13390677_112040_Linux-x86-64_1of7.zip
unzip -q p13390677_112040_Linux-x86-64_2of7.zip
unzip -q p13390677_112040_Linux-x86-64_3of7.zip
授权目录
chown -R oracle:oinstall /soft/database
chown -R grid:oinstall /soft/grid
双节点执行
root用户下,cvuqdisk安装
cd /soft/grid/rpm
rpm -ivh cvuqdisk-1.0.9-1.rpm
传输到节点二安装
scp cvuqdisk-1.0.9-1.rpm hisdb2:/tmp
rpm -ivh /tmp/cvuqdisk-1.0.9-1.rpm
7、互信
# cd /soft/grid/sshsetup --sshUserSetup.sh 用来建立互信
节点1执行
#./sshUserSetup.sh -user grid -hosts "hisdb1 hisdb2" -advanced exverify –confirm
#./sshUserSetup.sh -user oracle -hosts "hisdb1 hisdb2" -advanced exverify –confirm
测试互信
双节点执行
grid用户:
[grid@hisdb1:/home/grid]$ ssh hisdb1 date
Tue Feb 8 12:00:22 CST 2022
[grid@hisdb1:/home/grid]$ ssh hisdb2 date
Tue Feb 8 12:00:29 CST 2022
[grid@hisdb1:/home/grid]$ ssh hisdb1-priv date
The authenticity of host 'hisdb1-priv (192.168.11.132)' can't be
established.ECDSA key fingerprint is
SHA256:UD0pBe7zXgbfGVo1MQTuoNt9YtbIbc72mOPuwfyqbtI.ECDSA key fingerprint is MD5:9f:d5:0a:bd:ca:5d:22:35:aa:2f:82:9c:c3:84:76:88.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hisdb1-priv,192.168.11.132' (ECDSA) to the
list of known hosts.Tue Feb 8 12:01:05 CST 2022
[grid@hisdb1:/home/grid]$ ssh hisdb1-priv date
Tue Feb 8 12:01:11 CST 2022
[grid@hisdb1:/home/grid]$ ssh hisdb2-priv date
The authenticity of host 'hisdb2-priv (192.168.11.133)' can't be
established.ECDSA key fingerprint is
SHA256:JGSb/CtbyGV26THZOlQQwnONHH+HUXlVy4eWAzIyxlI.ECDSA key fingerprint is
MD5:54:f9:cd:9c:13:a7:79:e0:eb:04:c8:20:e9:ca:5c:d8.Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hisdb2-priv,192.168.11.133' (ECDSA) to the
list of known hosts.Tue Feb 8 12:01:24 CST 2022
[grid@hisdb1:/home/grid]$ ssh hisdb2-priv date
Tue Feb 8 12:01:27 CST 2022
oracle用户
[oracle@hisdb1:/home/oracle]$ ssh hisdb1 date
Tue Feb 8 12:02:20 CST 2022
[oracle@hisdb1:/home/oracle]$ ssh hisdb2 date
Tue Feb 8 12:02:27 CST 2022
[oracle@hisdb1:/home/oracle]$ ssh hisdb1-priv date
The authenticity of host 'hisdb1-priv (192.168.11.132)' can't be
established.ECDSA key fingerprint is
SHA256:UD0pBe7zXgbfGVo1MQTuoNt9YtbIbc72mOPuwfyqbtI.ECDSA key fingerprint is MD5:9f:d5:0a:bd:ca:5d:22:35:aa:2f:82:9c:c3:84:76:88.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hisdb1-priv,192.168.11.132' (ECDSA) to the
list of known hosts.Tue Feb 8 12:02:38 CST 2022
[oracle@hisdb1:/home/oracle]$ ssh hisdb1-priv date
Tue Feb 8 12:02:41 CST 2022
[oracle@hisdb1:/home/oracle]$ ssh hisdb2-priv date
The authenticity of host 'hisdb2-priv (192.168.11.133)' can't be
established.ECDSA key fingerprint is
SHA256:JGSb/CtbyGV26THZOlQQwnONHH+HUXlVy4eWAzIyxlI.ECDSA key fingerprint is MD5:54:f9:cd:9c:13:a7:79:e0:eb:04:c8:20:e9:ca:5c:d8.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'hisdb2-priv,192.168.11.133' (ECDSA) to the
list of known hosts.Tue Feb 8 12:02:56 CST 2022
[oracle@hisdb1:/home/oracle]$ ssh hisdb2-priv date
Tue Feb 8 12:02:58 CST 2022
#需要达到不输入yes
8、安装Grid
备注:节点1执行,安装前重启系统
[grid@hisdb1:/home/grid]$ export DISPLAY=192.168.133.1:0.0
[grid@hisdb1:/home/grid]$ cd /soft/grid
执行安装程序开始安装
[grid@hisdb1:/soft/grid]$ ./runInstaller
跳过版本更新
选择集群模式安装
注意:scan名称必须与/etc/hosts中配置的scan名称保持一致.
配置grid用户节点间互信
注意:点击Add添加节点二,pubile hostname为hisdb2, virtual hostname为hisdb2-vip
输入密码,点击setup开始互信
注意:OCR裁决盘这里冗余模式External,Normal,High对应磁盘数量为1,3,5.
不使用IPMI
此处若直接运行脚本,在执行第二个时会出现如下报错:
Adding
Clusterware entries to inittab
ohasd
failed to start
Failed
to start the Clusterware. Last 20 lines of the alert log follow:
2022-01-07
00:14:00.117:
[client(39969)]CRS-2101:The
OLR was formatted using version 3.
原因:RHEL7使用systemd而不是initd运行进程和重启进程,而root.sh是通过传统的initd运行ohasd进程,此为Linux7安装11204版本,执行root.sh时存在的BUG,需要在执行root.sh前安装补丁18370031修复
18370031补丁安装(双节点执行)
上传补丁包
p18370031_112040_Linux-x86-64.zip
解压补丁包
cd /soft
unzip -q
p18370031_112040_Linux-x86-64.zip
授权补丁包
chown -R grid:oinstall
/soft/18370031
在grid用户下安装补丁
opatch napply -oh $ORACLE_HOME
-local /soft/18370031 –silent
开始执行root脚本(双节点执行)
root用户下执行
/u01/app/oraInventory/orainstRoot.sh
/u01/app/11.2.0/grid/root.sh
未配置DNS,忽略.
以上两个failed忽略,为软件bug.
grid安装完毕.
[grid@hisdb1:/home/grid]$ crs_stat -t
Name Type Target State
Host------------------------------------------------------------
ora....ER.lsnr ora....er.type
ONLINE ONLINE hisdb1ora....N1.lsnr ora....er.type
ONLINE ONLINE hisdb1ora.OCR.dg ora....up.type ONLINE ONLINE
hisdb1ora.asm ora.asm.type ONLINE
ONLINE hisdb1ora.cvu ora.cvu.type ONLINE
ONLINE hisdb1ora.gsd ora.gsd.type OFFLINE
OFFLINEora....SM1.asm application ONLINE
ONLINE hisdb1ora....B1.lsnr application ONLINE
ONLINE hisdb1ora.hisdb1.gsd application OFFLINE
OFFLINEora.hisdb1.ons application ONLINE
ONLINE hisdb1ora.hisdb1.vip ora....t1.type
ONLINE ONLINE hisdb1ora....SM2.asm application ONLINE
ONLINE hisdb2ora....B2.lsnr application ONLINE
ONLINE hisdb2ora.hisdb2.gsd application OFFLINE
OFFLINEora.hisdb2.ons application ONLINE
ONLINE hisdb2ora.hisdb2.vip ora....t1.type
ONLINE ONLINE hisdb2ora....network ora....rk.type
ONLINE ONLINE hisdb1ora.oc4j ora.oc4j.type ONLINE
ONLINE hisdb1ora.ons ora.ons.type ONLINE
ONLINE hisdb1ora.scan1.vip ora....ip.type ONLINE ONLINE
hisdb1
9、配置asm数据磁盘
备注:节点1执行
[grid@hisdb1:/home/grid]$ export DISPLAY=192.168.133.1:0.0
[grid@hisdb1:/home/grid]$ asmca
点击create创建data
点击OK创建DATA
10、安装DB
备注:节点1执行
[oracle@hisdb1 database]$ export DISPLAY=192.168.133.1:0.0
执行安装程序开始安装
[oracle@hisdb1 database]$./runInstaller
不接受oracle更新邮件
跳过软件更新
输入密码,点击setup开始,成功之后点击test.
Linux7安装11204版本,oracle软件安装过程中报错:ins_emagent.mk,需要修改文件/sysman/lib/ins_emagent.mk来修复
执行以下命令,然后点击继续,节点1执行:
$ vi $ORACLE_HOME/sysman/lib/ins_emagent.mk
将$(MK_EMAGENT_NMECTL)修改为:$(MK_EMAGENT_NMECTL-lnnz11)
执行root.sh脚本(hisdb1&hisdb2)
[root@hisdb1 ~]# /u01/app/oracle/product/11.2.0/db/root.sh
至此,DB成功安装
11、创建数据库实例
[oracle@hisdb1:/home/oracle]$export DISPLAY=192.168.133.1:0.0
[oracle@hisdb1:/home/oracle]$ dbca
选择rac模式
关闭闪回区,建库可随时开启,
此处注意开启归档.
选择自定义组件,默认即可
配置初始化参数
注意:如果使用自动管理内存,建议使用70%物理内存,前提是/etc/shm要和物理内存一样大.block默认8K,进程数增加到1500:
12、问题处理
问题1:节点二报错
SQL> startup
ORA-00205: error in identifying
control file, check alert log for more info
权限问题
[root@hisdb2 bin]# pwd
/u01/app/oracle/product/11.2.0/db/bin
[root@hisdb2 bin]# chown -R
oracle:asmadmin oracle
[root@hisdb2 bin]# chmod 6751
oracle
问题2:数据库已启动,但ora.orcl.db却为offline
SQL> shutdown immediate
[oracle@hisdb1:/home/oracle]$
srvctl enable database -d orcl
[oracle@hisdb1:/home/oracle]$
srvctl start database -d orcl
说明:本文参考网址https://cloud.tencent.com/developer/article/1863264.
如需转载请备注出处,此文仅作为技术分享,请勿用作商业用途.如有侵权,请联系博主删帖.