【Oracle RAC Database】在 CentOS 7.5 安装 Grid Infrastructure
原创
©著作权归作者所有:来自51CTO博客作者dbprofessional的原创作品,请联系作者获取转载授权,否则将追究法律责任
[root@node01 ~]# cat >> /etc/hosts <<EOF
# Public Network - (eth0)
192.168.1.101 node01
192.168.1.102 node02
# Public Virtual IP (VIP) addresses - (eth0:1)
192.168.1.111 node01-vip
192.168.1.112 node02-vip
EOF
[root@node01 ~]# yum -y install gcc gcc-c++ glibc glibc-common ksh make sysstat \
binutils elfutils-libelf elfutils-libelf-devel elfutils-libelf-devel-static \
glibc-devel glibc-headers libaio libaio-devel libstdc++ libstdc++-devel \
compat-libcap1 compat-libstdc++-33 libXext unixODBC unixODBC-devel
[root@node01 ~]# CVUQDISK_GRP=oinstall
[root@node01 ~]# export CVUQDISK_GRP
[root@node01 ~]# yum install -y smartmontools
[root@node01 ~]# rpm -ivh cvuqdisk-1.0.9-1.rpm
[root@node01 ~]# yum install -y ntp
[root@node01 ~]# vim /etc/ntp.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst
server ntp4.aliyun.com iburst
[root@node01 ~]# vim /etc/sysconfig/ntpd
# Command line options for ntpd
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid -g"
[root@node01 ~]# systemctl start ntpd.service
[root@node01 ~]# systemctl enable ntpd.service
[root@node01 ~]# systemctl status ntpd.service
[root@node01 ~]# ntpstat
synchronised to NTP server (203.107.6.88) at stratum 3
time correct to within 961 ms
polling server every 64 s
[root@node01 ~]# cat >> /etc/sysctl.conf <<EOF
kernel.shmmax = 2141192192
kernel.shmmni = 4096
kernel.shmall = 2097152
kernel.sem = 250 32000 100 128
fs.file-max = 6815744
fs.aio-max-nr=1048576
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default=262144
net.core.rmem_max=4194304
net.core.wmem_default=262144
net.core.wmem_max=1048576
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.all.rp_filter = 0
EOF
[root@node01 ~]# sysctl -p
[root@node01 ~]# cat >> /etc/security/limits.conf <<EOF
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
EOF
[root@node01 ~]# mkdir -p /u01/app/grid
[root@node01 ~]# mkdir -p /u01/app/11.2.0.4/grid
[root@node01 ~]# chown -R grid:oinstall /u01
[grid@node01 ~]$ vi .bash_profile
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0.4/grid
export PATH=$ORACLE_HOME/bin:$PATH
export DISPLAY=192.168.1.100:0
stty erase ^H
[root@node02 ~]# cat >> /etc/hosts <<EOF
192.168.1.101 node01
192.168.1.102 node02
# Public Virtual IP (VIP) addresses - (eth0:1)
192.168.1.111 node01-vip
192.168.1.112 node02-vip
EOF
[root@node02 ~]# yum -y install gcc gcc-c++ glibc glibc-common ksh make sysstat \
binutils elfutils-libelf elfutils-libelf-devel elfutils-libelf-devel-static \
glibc-devel glibc-headers libaio libaio-devel libstdc++ libstdc++-devel \
compat-libstdc++-33 compat-libcap1 libXext unixODBC unixODBC-devel
[root@node01 ~]# CVUQDISK_GRP=oinstall
[root@node01 ~]# export CVUQDISK_GRP
[root@node01 ~]# yum install -y smartmontools
[root@node01 ~]# rpm -ivh cvuqdisk-1.0.9-1.rpm
[root@node01 ~]# yum install -y ntp
[root@node01 ~]# vim /etc/ntp.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
server ntp3.aliyun.com iburst
server ntp4.aliyun.com iburst
[root@node01 ~]# vim /etc/sysconfig/ntpd
# Command line options for ntpd
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid -g"
[root@node01 ~]# systemctl start ntpd.service
[root@node01 ~]# systemctl enable ntpd.service
[root@node01 ~]# systemctl status ntpd.service
[root@node02 ~]# ntpstat
synchronised to NTP server (203.107.6.88) at stratum 3
time correct to within 21 ms
polling server every 64 s
[root@node02 ~]# cat >> /etc/sysctl.conf <<EOF
kernel.shmmax = 2141192192
kernel.shmmni = 4096
kernel.shmall = 2097152
kernel.sem = 250 32000 100 128
fs.file-max = 6815744
fs.aio-max-nr = 1048576
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.all.rp_filter = 0
EOF
[root@node02 ~]# sysctl -p
[root@node02 ~]# cat >> /etc/security/limits.conf <<EOF
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
EOF
[root@node02 ~]# mkdir -p /u01/app/grid
[root@node02 ~]# mkdir -p /u01/app/11.2.0.4/grid
[root@node02 ~]# chown -R grid:oinstall /u01
[root@node02 ~]# su - grid
[grid@node02 ~]$ vi .bash_profile
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0.4/grid
export PATH=$ORACLE_HOME/bin:$PATH
export DISPLAY=192.168.1.100:0
stty erase ^H
[grid@node01 ~]$ ssh-keygen
[grid@node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub grid@node01
[grid@node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub grid@node02
[grid@node01 ~]$ (ssh node01 "date;hostname";ssh node02 "date;hostname")
Fri Aug 18 14:45:13 CST 2023
node01
Fri Aug 18 13:33:48 CST 2023
node02
[grid@node02 ~]$ ssh-keygen
[grid@node02 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub grid@node01
[grid@node02 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub grid@node02
[grid@node02 ~]$ (ssh node01 "date;hostname";ssh node02 "date;hostname")
Fri Aug 18 14:45:24 CST 2023
node01
Fri Aug 18 13:34:00 CST 2023
node02
[grid@node01 ~]$ unzip p13390677_112040_Linux-x86-64_3of7.zip
[grid@node01 ~]$ unzip p19404309_112040_Linux-x86-64.zip
[grid@node01 ~]$ cp b19404309/grid/cvu_prereq.xml /home/grid/grid/stage/cvu/
[grid@node01 ~]$ cd grid
[grid@node01 grid]$ ./runcluvfy.sh stage -pre crsinst -n node01,node02 -fixup -verbose >precheck.txt
[grid@node01 grid]$ ./runInstaller
[root@node01 ~]# /u01/app/oraInventory/orainstRoot.sh
[root@node02 ~]# /u01/app/oraInventory/orainstRoot.sh
[grid@node01 ~]$ unzip p18370031_112040_Linux-x86-64.zip
[grid@node01 ~]$ cd $ORACLE_HOME/OPatch
[grid@node01 OPatch]$ ./opatch napply -local /home/grid/18370031
[root@node01 ~]# /u01/app/11.2.0.4/grid/root.sh
[grid@node02 ~]$ unzip p18370031_112040_Linux-x86-64.zip
[grid@node02 ~]$ cd $ORACLE_HOME/OPatch
[grid@node02 OPatch]$ ./opatch napply -local /home/grid/18370031
[root@node02 ~]# /u01/app/11.2.0.4/grid/root.sh
[grid@node01 ~]$ crsctl check cluster -all
**************************************************************
node01:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
node02:
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
**************************************************************
[grid@node01 ~]$ crsctl status res -t
--------------------------------------------------------------------------------
NAME TARGET STATE SERVER STATE_DETAILS
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
ONLINE ONLINE node01
ONLINE ONLINE node02
ora.OCR_VF.dg
ONLINE ONLINE node01
ONLINE ONLINE node02
ora.asm
ONLINE ONLINE node01 Started
ONLINE ONLINE node02 Started
ora.gsd
OFFLINE OFFLINE node01
OFFLINE OFFLINE node02
ora.net1.network
ONLINE ONLINE node01
ONLINE ONLINE node02
ora.ons
ONLINE ONLINE node01
ONLINE ONLINE node02
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE node02
ora.LISTENER_SCAN2.lsnr
1 ONLINE ONLINE node01
ora.LISTENER_SCAN3.lsnr
1 ONLINE ONLINE node01
ora.cvu
1 ONLINE ONLINE node01
ora.node01.vip
1 ONLINE ONLINE node01
ora.node02.vip
1 ONLINE ONLINE node02
ora.oc4j
1 ONLINE ONLINE node01
ora.scan1.vip
1 ONLINE ONLINE node02
ora.scan2.vip
1 ONLINE ONLINE node01
ora.scan3.vip
1 ONLINE ONLINE node01