Oracle 19c rac的搭建

基于18c的rac进行删除再搭建: 

http://blog.itpub.net/26736162/viewspace-2220931/

hostnamectl set-hostname raclhr-19c-n1
hostnamectl set-hostname raclhr-19c-n2


#Public IP
192.168.59.52            raclhr-19c-n1
192.168.59.53            raclhr-19c-n2

#Private IP
192.168.2.52             raclhr-19c-n1-priv
192.168.2.53             raclhr-19c-n2-priv

#Virtual IP
192.168.59.54            raclhr-19c-n1-vip
192.168.59.55            raclhr-19c-n2-vip

#Scan IP
192.168.59.56            raclhr-19c-scan
192.168.59.57            raclhr-19c-scan
192.168.59.58            raclhr-19c-scan



mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
mkdir -p /u01/app/oracle/product/19.3.0/dbhome_1
chown -R grid:oinstall /u01
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/

mkdir -p /u01/app/oraInventory
chown -R grid:oinstall /u01/app/oraInventory
chmod -R 775 /u01/app/oraInventory


export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/19.3.0/grid
export GRID_BASE=$ORACLE_BASE
export GRID_HOME=$ORACLE_HOME
export PATH=$PATH:$ORACLE_HOME/bin


yum install bc gcc gcc-c++  binutils  make gdb cmake  glibc ksh \
elfutils-libelf elfutils-libelf-devel fontconfig-devel glibc-devel  \
libaio libaio-devel libXrender libXrender-devel libX11 libXau sysstat \
libXi libXtst libgcc librdmacm-devel libstdc++ libstdc++-devel libxcb \
net-tools nfs-utils compat-libcap1 compat-libstdc++  smartmontools  targetcli \
python python-configshell python-rtslib python-six  unixODBC unixODBC-devel




grid用户:
unzip /soft/LINUX.X64_193000_grid_home.zip  -d /u01/app/19.3.0/grid


oracle用户:
unzip /soft/LINUX.X64_193000_db_home.zip -d /u01/app/oracle/product/19.3.0/dbhome_1



$ORACLE_HOME/runcluvfy.sh  stage -pre crsinst -n "raclhr-19c-n1,raclhr-19c-n2"  -verbose

 

/u01/app/19.3.0/grid/oui/prov/resources/scripts/sshUserSetup.sh -user grid  -hosts "raclhr-19c-n1 raclhr-19c-n2" -advanced exverify –confirm
/u01/app/19.3.0/grid/oui/prov/resources/scripts/sshUserSetup.sh -user oracle  -hosts "raclhr-19c-n1 raclhr-19c-n2" -advanced exverify -confirm


--19c的mgmtdb可以选择不安装
$GRID_HOME/gridSetup.sh -silent  -force -noconfig -waitforcompletion -ignorePrereq \
INVENTORY_LOCATION=/u01/app/oraInventory \
oracle.install.option=CRS_CONFIG \
ORACLE_BASE=/u01/app/grid \
oracle.install.asm.OSDBA=asmdba \
oracle.install.asm.OSOPER=asmoper \
oracle.install.asm.OSASM=asmadmin \
oracle.install.crs.config.scanType=LOCAL_SCAN \
oracle.install.crs.config.gpnp.scanName=raclhr-19c-scan \
oracle.install.crs.config.gpnp.scanPort=1521 \
oracle.install.crs.config.ClusterConfiguration=STANDALONE \
oracle.install.crs.config.configureAsExtendedCluster=false \
oracle.install.crs.config.clusterName=raclhr-cluster \
oracle.install.crs.config.gpnp.configureGNS=false \
oracle.install.crs.config.autoConfigureClusterNodeVIP=false \
oracle.install.crs.config.clusterNodes=raclhr-19c-n1:raclhr-19c-n1-vip:HUB,raclhr-19c-n2:raclhr-19c-n2-vip:HUB \
oracle.install.crs.config.networkInterfaceList=ens33:192.168.59.0:1,ens37:192.168.2.0:5,virbr0:192.168.122.0:3 \
oracle.install.asm.configureGIMRDataDG=true \
oracle.install.crs.config.useIPMI=false \
oracle.install.asm.storageOption=ASM \
oracle.install.asmOnNAS.configureGIMRDataDG=false \
oracle.install.asm.SYSASMPassword=lhr \
oracle.install.asm.diskGroup.name=OCR \
oracle.install.asm.diskGroup.redundancy=EXTERNAL \
oracle.install.asm.diskGroup.AUSize=4 \
oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asm-diskc,,/dev/asm-diskd,,/dev/asm-diske, \
oracle.install.asm.diskGroup.disks=/dev/asm-diskc,/dev/asm-diskd,/dev/asm-diske \
oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asm-* \
oracle.install.asm.configureAFD=false \
oracle.install.crs.configureRHPS=false \
oracle.install.crs.config.ignoreDownNodes=false \
oracle.install.config.managementOption=NONE \
oracle.install.crs.rootconfig.executeRootScript=false


/u01/app/19.3.0/grid/gridSetup.sh -executeConfigTools -silent -responseFile /u01/app/19.3.0/grid/install/response/grid_2019-08-23_03-14-44PM.rsp


$GRID_HOME/bin/kfod disks=asm  st=true ds=true cluster=true





$ORACLE_HOME/runcluvfy.sh  stage -post  crsinst -n "raclhr-19c-n1,raclhr-19c-n2"  -verbose 


$ORACLE_HOME/runInstaller -silent  -force -noconfig  -ignorePrereq \
oracle.install.option=INSTALL_DB_SWONLY \
UNIX_GROUP_NAME=oinstall \
INVENTORY_LOCATION=/u01/app/oraInventory \
ORACLE_BASE=/u01/app/oracle \
ORACLE_HOME=/u01/app/oracle/product/19.3.0/dbhome_1 \
oracle.install.db.InstallEdition=EE \
oracle.install.db.OSDBA_GROUP=dba \
oracle.install.db.OSOPER_GROUP=oper \
oracle.install.db.OSBACKUPDBA_GROUP=backupdba \
oracle.install.db.OSDGDBA_GROUP=dgdba \
oracle.install.db.OSKMDBA_GROUP=kmdba \
oracle.install.db.OSRACDBA_GROUP=racdba \
oracle.install.db.CLUSTER_NODES=raclhr-19c-n1,raclhr-19c-n2 \
oracle.install.db.config.starterdb.type=GENERAL_PURPOSE




create diskgroup DATA external redundancy disk '/dev/asm-diski','/dev/asm-diskj','/dev/asm-diskk'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';

create diskgroup FRA external redundancy disk '/dev/asm-diskl','/dev/asm-diskm','/dev/asm-diskn'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';

srvctl start diskgroup -diskgroup data
srvctl start diskgroup -diskgroup fra
alter diskgroup all mount;



dbca -silent -ignorePreReqs  -ignorePrereqFailure  -createDatabase -templateName General_Purpose.dbc -responseFile NO_VALUE \
-gdbname rac19c  -sid rac19c \
-createAsContainerDatabase TRUE \
-sysPassword lhr -systemPassword lhr -pdbAdminPassword lhr -dbsnmpPassword lhr \
-datafileDestination '+DATA' -recoveryAreaDestination '+FRA' \
-storageType ASM \
-characterset AL32UTF8 \
-sampleSchema true \
-totalMemory 1024 \
-databaseType MULTIPURPOSE \
-emConfiguration none \
-nodeinfo raclhr-19c-n1,raclhr-19c-n2


-------------------- 19c的MGMTDB是可选的
1.完成CDB的创建
create diskgroup MGMT external redundancy disk '/dev/asm-diskf','/dev/asm-diskg','/dev/asm-diskh'  ATTRIBUTE 'compatible.rdbms' = '19.0', 'compatible.asm' = '19.0';
/u01/app/19.3.0/grid/bin/dbca -silent -createDatabase -createAsContainerDatabase true -templateName MGMTSeed_Database.dbc -sid -MGMTDB -gdbName _mgmtdb -storageType ASM -diskGroupName MGMT -datafileJarLocation /u01/app/19.3.0/grid/assistants/dbca/templates -characterset AL32UTF8 -autoGeneratePasswords -skipUserTemplateCheck
2.完成pdb的创建
mgmtca -local


 

 

 

 

Oracle 19C RAC 静默(silent)安装on RHEL7.x 一、安装准备

1.1.RHEL版本及IP规划

1.1.1.OS版本信息

[root@localhost ~]# cat /etc/redhat-release 
Red Hat Enterprise Linux Server release 7.6 (Maipo)
[root@localhost ~]# uname -r
3.10.0-957.el7.x86_64

1.1.2.IP规划

主机 Public VIP Scanip Private
ydb01 192.168.10.91 192.168.10.93 192.168.10.95 172.16.16.91/92
ydb02 192.168.10.92 192.168.10.94 192.168.10.95 172.16.16.93/94

2.硬件检查

1.2.1.硬盘空间检查

/tmp目录大小至少:1GB
安装Grid Infrastracture所需空间:12GB
安装Oracle Database所需空间:7.3GB
此外安装过程中分析、收集、跟踪文件所需空间:10GB
建议总共至少100GB(此处不包含ASM或NFS的空间需求)

# df -h

1.2.2.内存检查

内存大小:至少8GB
Swap大小:
当内存为4GB-16GB时,Swap需要大于等于系统内存。
当内存大于16GB时,Swap等于16GB即可。

# grep MemTotal /proc/meminfo
# grep SwapTotal /proc/meminfo

1.3.创建用户和组

创建用户和组,用户uid和组gid所有节点需要相同。

1.3.1.添加用户和用户组

/usr/sbin/groupadd -g 50001 oinstall
/usr/sbin/groupadd -g 50002 dba
/usr/sbin/groupadd -g 50003 oper
/usr/sbin/groupadd -g 50004 backupdba
/usr/sbin/groupadd -g 50005 dgdba
/usr/sbin/groupadd -g 50006 kmdba
/usr/sbin/groupadd -g 50007 asmdba
/usr/sbin/groupadd -g 50008 asmoper
/usr/sbin/groupadd -g 50009 asmadmin
/usr/sbin/groupadd -g 50010 racdba
/usr/sbin/useradd -u 50011 -g oinstall -G dba,asmdba,asmoper,asmadmin,racdba grid
/usr/sbin/useradd -u 50012 -g oinstall -G dba,oper,backupdba,dgdba,kmdba,asmdba,racdba oracle
echo "oracle" | passwd --stdin oracle
echo "oracle" | passwd --stdin grid

常见用户组说明

角色 权限
oinstall   安装和升级oracle软件
dba sysdba 创建、删除、修改、启动、关闭数据库,切换日志归档模式,备份恢复数据库
oper sysoper 启动、关闭、修改、备份、恢复数据库,修改归档模式
asmdba sysdba自动存储管理 管理ASM实例
asmoper sysoper自动存储管理 启动、停止ASM实例
asmadmin sysasm 挂载、卸载磁盘组,管理其他存储设备
backupdba sysbackup 启动关闭和执行备份恢复(12c)
dgdba sysdg 管理Data Guard(12c)
kmdba syskm 加密管理相关操作
racdba   rac管理

1.3.2.创建安装目录

# mkdir /opt/oracle
# mkdir /opt/oracle/dbbase
# mkdir /opt/oracle/gbase
# mkdir /opt/oracle/ghome
# mkdir /opt/oracle/oraInventory
# chown -R grid:oinstall /opt/oracle
# chown -R oracle:oinstall /opt/oracle/dbbase
# chmod -R g+w /opt/oracle

1.3.3.

vi /etc/fstab
tmpfs      /dev/shm      tmpfs   defaults,size=10g   0   0

mount -o remount /dev/shm

[root@ydb01 ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2       268G   16G  253G   6% /
devtmpfs        7.9G     0  7.9G   0% /dev
tmpfs            10G     0   10G   0% /dev/shm
tmpfs           7.9G   13M  7.9G   1% /run
tmpfs           7.9G     0  7.9G   0% /sys/fs/cgroup
tmpfs           1.6G   12K  1.6G   1% /run/user/42
tmpfs           1.6G     0  1.6G   0% /run/user/0
[root@ydb01 ~]# 

1.3.4.修改用户环境变量

vi /home/oracle/.bash_profile
export ORACLE_BASE=/opt/oracle/dbbase
export ORACLE_HOME=$ORACLE_BASE/19c/db_1
export ORACLE_SID=emrep
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
umask 0022
# export DISPLAY=0.0.0.0:0.0
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export LC_ALL=en_US.UTF-8

vi /home/grid/.bash_profile
export ORACLE_BASE=/opt/oracle/gbase
export ORACLE_HOME=/opt/oracle/ghome
export GI_HOME=$ORACLE_HOME
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_SID=+ASM1
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
umask 0022
# export DISPLAY=0.0.0.0:0.0
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export LC_ALL=en_US.UTF-8

1.4.主机名与hosts

1.4.1.修改主机名

# hostname
# hostnamectl --static set-hostname ydb01

1.4.2.添加Hosts解析

# vi /etc/hosts
#Public
192.168.10.91  ydb01.localdomain  ydb01                   
192.168.10.92  ydb02.localdomain  ydb02
#Virtual             
192.168.10.93  ydb01-vip.localdomain  ydb01-vip                   
192.168.10.94  ydb01-vip.localdomain  ydb02-vip
#Private         
172.16.16.91   ydb01-priv1.localdomain  ydb01-priv1
172.16.16.92   ydb01-priv2.localdomain  ydb01-priv2
172.16.16.93   ydb02-priv1.localdomain  ydb02-priv1
172.16.16.94   ydb02-priv2.localdomain  ydb02-priv2
#Scanip
192.168.10.95  ydb-scan.localdomain  ydb-scan

1.4.3.关闭ZEROCONF

echo "NOZEROCONF=yes"  >>/etc/sysconfig/network
  • 1

1.5.配置用户limits

vi  /etc/security/limits.conf
#for oracle 19c rac @Yong @20190509
grid  soft  nproc   16384
grid  hard  nproc   65536
grid  soft  nofile  32768
grid  hard  nofile  65536
grid  soft  stack   32768
grid  hard  stack   65536
grid  soft  memlock  -1
grid  hard  memlock  -1
oracle  soft  nproc   16384
oracle  hard  nproc   65536
oracle  soft  nofile  32768
oracle  hard  nofile  65536
oracle  soft  stack   32768
oracle  hard  stack   65536
oracle  soft  memlock  -1
oracle  hard  memlock  -1

vi /etc/pam.d/login
#for oracle 19c rac @Yong @20190509
session required pam_limits.so 

1.6.防火墙与Selinux

1.6.1.关闭selinux

sed -i  "s/SELINUX=enforcing/SELINUX=disabled/"  /etc/selinux/config
  •  

1.6.2.关闭防火墙

systemctl  stop firewalld
systemctl disable firewalld

1.7.设置OS内核参数

vi /etc/sysctl.conf
#for oracle 19c rac @Yong @20190509
####fs setting
fs.aio-max-nr = 4194304
fs.file-max = 6815744
####kernel setting
kernel.shmall = 4194304
kernel.shmmax = 16106127360
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
kernel.panic_on_oops = 1
kernel.panic = 10
#### Net Setting
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 4194304
##TCP Cache Setting
net.ipv4.tcp_moderate_rcvbuf=1
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.conf.ens36.rp_filter = 2
net.ipv4.conf.ens35.rp_filter = 2
net.ipv4.conf.ens34.rp_filter = 1
####Memory Setting
vm.vfs_cache_pressure=200 
vm.swappiness=10
vm.min_free_kbytes=102400
#vm.nr_hugepages=10

1.8.安装必要的rpm包

1.8.1.配置本地yum源

[root@localhost ~]# df -h
Filesystem      Size  Used Avail Use% Mounted on
/dev/sda2       268G  4.3G  264G   2% /
devtmpfs        7.9G     0  7.9G   0% /dev
tmpfs           7.9G     0  7.9G   0% /dev/shm
tmpfs           7.9G   13M  7.9G   1% /run
tmpfs           7.9G     0  7.9G   0% /sys/fs/cgroup
tmpfs           1.6G   56K  1.6G   1% /run/user/0
/dev/sr0        4.2G  4.2G     0 100% /run/media/root/RHEL-7.6 Server.x86_64

vi /etc/yum.repos.d/rhel-iso.repo
[ISO-DVD]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=file:///run/media/root/RHEL-7.6\ Server.x86_64/
enabled=1
gpgcheck=0

如果没有光驱可以将RHEL的安装ISO文件上传到服务器上,然后通过下列命令挂载ISO

mkdir /mnt/rhel76iso
mount -o loop -t iso9660 /root/rhel-server-7.6-x86_64-dvd.iso /mnt/rhel76iso
vi /etc/yum.repos.d/rhel-iso.repo
[ISO-DVD]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=file:///mnt/rhel76iso
enabled=1
gpgcheck=0

1.8.2.安装rpm包

yum install bc gcc gcc-c++  binutils  make gdb cmake  glibc ksh \
elfutils-libelf elfutils-libelf-devel fontconfig-devel glibc-devel  \
libaio libaio-devel libXrender libXrender-devel libX11 libXau sysstat \
libXi libXtst libgcc librdmacm-devel libstdc++ libstdc++-devel libxcb \
net-tools nfs-utils compat-libcap1 compat-libstdc++  smartmontools  targetcli \
python python-configshell python-rtslib python-six  unixODBC unixODBC-devel

由于RHEL7 缺失compat-libstdc+±33包,需要单独下载安装

wget  ftp://ftp.pbone.net/mirror/ftp5.gwdg.de/pub/opensuse/repositories/home:/matthewdva:/build:/RedHat:/RHEL-7/complete/x86_64/compat-libstdc++-33-3.2.3-71.el7.x86_64.rpm
yum  localinstall  compat-libstdc++-33-3.2.3-71.el7.x86_64.rpm

1.9. ASM磁盘初始化

如果不部署mgmtdb,3块5G的磁盘用于OCR和voting disk即可,如果部署mgmtdb
mgmtdb独立磁盘的情况下,normal冗余需要53G+存储,external冗余需要27G+存储
mgmtdb与ocr及vote disk在同一磁盘组的情况下,normal的磁盘需要56G+存储

本次安装不部署mgmtdb,规划四块存储,3块5G和1块50G,采用udev映射

1.9.1.初始化磁盘

[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdb
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdc
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sdd
[root@ydb01 ~]# echo -e "n\np\n1\n\n\nw" | fdisk /dev/sde

1.9.2. udev映射

vi  /etc/scsi_id.config
options=-g
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdb1
36000c29a5fe67df9fac43441beb4280f
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdc1
36000c29474a249ab2c6f9b2977d040b3
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdd
36000c2925df7736e997e8e6a89865539
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdb1
36000c29a5fe67df9fac43441beb4280f
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdc1
36000c29474a249ab2c6f9b2977d040b3
[root@ydb01 ~]# /usr/lib/udev/scsi_id -g -u -d /dev/sdd
36000c2925df7736e997e8e6a89865539

vi  /etc/udev/rules.d/99-oracle-asmdevices.rules

KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c29a5fe67df9fac43441beb4280f", SYMLINK+="asmdisks/asmdisk01", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c29474a249ab2c6f9b2977d040b3", SYMLINK+="asmdisks/asmdisk02", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c2925df7736e997e8e6a89865539", SYMLINK+="asmdisks/asmdisk03", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd?1", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$parent", RESULT=="36000c2930898d85d2050c12f7eb96ef9", SYMLINK+="asmdisks/asmdisk04", OWNER="grid", GROUP="asmadmin", MODE="0660"

[root@ydb01 ~]# partprobe
[root@ydb01 ~]# /sbin/partprobe /dev/sdb1 
[root@ydb01 ~]# /sbin/partprobe /dev/sdc1
[root@ydb01 ~]# /sbin/partprobe /dev/sdd1
[root@ydb01 ~]# /sbin/partprobe /dev/sde1

[root@ydb01 ~]# ls -alrth /dev/asmdisks/*
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk01 -> ../sdb1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk02 -> ../sdc1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk03 -> ../sdd1
lrwxrwxrwx 1 root root 7 May  9 16:10 /dev/asmdisks/asmdisk04 -> ../sde1

1.10.其他优化

1.10.1.关闭THP及NUMA

vi  /etc/default/grub
在GRUB_CMDLINE_LINUX添加下列选项,用于关闭THP和NUMA
transparent_hugepage=never numa=off
编译并重启主机     
On BIOS: ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
On UEFI: ~]# grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg

1.10.2.NTP配置

如果使用CTSS来同步集群间的时间则可以关闭时间同步

systemctl stop chronyd
systemctl disable chronyd

mv  /etc/chrony.conf   /etc/chrony.conf.bak

1.10.3.禁用avahi

systemctl stop avahi-dnsconfd
systemctl stop avahi-daemon
systemctl disable avahi-dnsconfd
systemctl disable avahi-daemon
二. 安装部署Grid

2.1.解压软件

[grid@ydb01 ~]$ cd /opt/software/
[grid@ydb01 software]$ ls -alrth
total 5.6G
-rwxrwxr-x  1 oracle oinstall 2.7G May  9 11:46 LINUX.X64_193000_grid_home.zip
-rwxrwxr-x  1 oracle oinstall 2.9G May  9 11:48 LINUX.X64_193000_db_home.zip
drwxr-xr-x. 5 root   root       46 May  9 16:33 ..
drwxrwxr-x  2 oracle oinstall   80 May  9 16:45 .
[grid@ydb01 software]$ unzip LINUX.X64_193000_grid_home.zip  -d $ORACLE_HOME

2.2.配置互信

$ORACLE_HOME/oui/prov/resources/scripts/sshUserSetup.sh -user grid  -hosts "ydb01 ydb02"  -advanced -noPromptPassphrase

[grid@ydb01 ghome]$ ssh ydb02 date      
Thu May  9 05:26:35 EDT 2019
[grid@ydb01 ghome]$ ssh ydb02-priv1 date
Thu May  9 05:26:38 EDT 2019
[grid@ydb01 ghome]$ ssh ydb02-priv2 date
Thu May  9 05:26:41 EDT 2019
[grid@ydb01 ghome]$ ssh ydb01 date      
Thu May  9 17:25:59 CST 2019
[grid@ydb01 ghome]$ ssh ydb01-priv1 date
Thu May  9 17:26:01 CST 2019
[grid@ydb01 ghome]$ ssh ydb01-priv2 date
Thu May  9 17:26:04 CST 2019

2.3.安装前检查

$ORACLE_HOME/runcluvfy.sh  stage -pre crsinst -n "ydb01,ydb02"  -verbose
..............
..............
..............
Failures were encountered during execution of CVU verification request "stage -pre crsinst".

Verifying Package: cvuqdisk-1.0.10-1 ...FAILED
ydb02: PRVG-11550 : Package "cvuqdisk" is missing on node "ydb02"

ydb01: PRVG-11550 : Package "cvuqdisk" is missing on node "ydb01"

Verifying Time zone consistency ...FAILED
PRVF-5479 : Time zone is not the same on all cluster nodes.
Found time zone "CST-8CDT" on nodes "ydb01".
Found time zone "EST5EDT" on nodes "ydb02".

Verifying resolv.conf Integrity ...FAILED
ydb02: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02
ydb02: PRVG-10048 : Name "ydb02" was not resolved to an address of the
       specified type by name servers "192.168.194.2".

ydb01: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02
ydb01: PRVG-10048 : Name "ydb01" was not resolved to an address of the
       specified type by name servers "192.168.194.2".

Verifying RPM Package Manager database ...INFORMATION
PRVG-11250 : The check "RPM Package Manager database" was not performed because
it needs 'root' user privileges.

Verifying /dev/shm mounted as temporary file system ...FAILED
ydb02: PRVE-0421 : No entry exists in /etc/fstab for mounting /dev/shm

ydb01: PRVE-0421 : No entry exists in /etc/fstab for mounting /dev/shm


CVU operation performed:      stage -pre crsinst
Date:                         May 9, 2019 5:29:53 PM
CVU home:                     /opt/oracle/ghome/
User:                         grid

根据检查结果修正操作系统即可

2.4.Grid软件安装

安装前需要在两节点上安装cvuqdisk-1.0.10-1.x86_64,软件在¥ORACLE_HOME//cv/rpm/下

${ORACLE_HOME}/gridSetup.sh -ignorePrereq -waitforcompletion -silent \
 -responseFile ${ORACLE_HOME}/install/response/gridsetup.rsp \
 INVENTORY_LOCATION=/opt/oracle/oraInventory \
 SELECTED_LANGUAGES=en,en_GB \
 oracle.install.option=CRS_CONFIG \
 ORACLE_BASE=/opt/oracle/gbase \
 oracle.install.asm.OSDBA=asmdba \
 oracle.install.asm.OSASM=asmadmin \
 oracle.install.asm.OSOPER=asmoper  \
 oracle.install.crs.config.scanType=LOCAL_SCAN \
 oracle.install.crs.config.gpnp.scanName=ydb-scan \
 oracle.install.crs.config.gpnp.scanPort=1521 \
 oracle.install.crs.config.ClusterConfiguration=STANDALONE \
 oracle.install.crs.config.configureAsExtendedCluster=false \
 oracle.install.crs.config.clusterName=ora19c-cluster \
 oracle.install.crs.config.gpnp.configureGNS=false \
 oracle.install.crs.config.autoConfigureClusterNodeVIP=false \
 oracle.install.crs.config.clusterNodes=ydb01:ydb01-vip:HUB,ydb02:ydb02-vip:HUB \
 oracle.install.crs.config.networkInterfaceList=ens34:192.168.10.0:1,ens35:172.16.16.0:5,ens36:172.16.16.0:5 \
 oracle.install.asm.configureGIMRDataDG=false \
 oracle.install.crs.config.useIPMI=false \
 oracle.install.asm.storageOption=ASM \
 oracle.install.asmOnNAS.configureGIMRDataDG=false \
 oracle.install.asm.SYSASMPassword=Oracle_2019 \
 oracle.install.asm.diskGroup.name=OCRDG \
 oracle.install.asm.diskGroup.redundancy=NORMAL \
 oracle.install.asm.diskGroup.AUSize=4 \
 oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asmdisks/asmdisk01,,/dev/asmdisks/asmdisk02,,/dev/asmdisks/asmdisk03,  \
 oracle.install.asm.diskGroup.disks=/dev/asmdisks/asmdisk01,/dev/asmdisks/asmdisk02,/dev/asmdisks/asmdisk03  \
 oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*  \
 oracle.install.asm.configureAFD=false \
 oracle.install.asm.monitorPassword=Oracle_2019 \
 oracle.install.crs.configureRHPS=false \
 oracle.install.crs.config.ignoreDownNodes=false \
 oracle.install.config.managementOption=NONE \
 oracle.install.config.omsPort=0 \
 oracle.install.crs.rootconfig.executeRootScript=false \

安装日志如下,需要root用户执行下列脚本

Launching Oracle Grid Infrastructure Setup Wizard...

[WARNING] [INS-32047] The location (/opt/oracle/oraInventory) specified for the central inventory is not empty.
   ACTION: It is recommended to provide an empty location for the inventory.
[WARNING] [INS-13013] Target environment does not meet some mandatory requirements.
   CAUSE: Some of the mandatory prerequisites are not met. See logs for details. /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log
   ACTION: Identify the list of failed prerequisite checks from the log: /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log. Then either from the log file or from installation manual find the appropriate configuration to meet the prerequisites and fix it manually.
The response file for this session can be found at:
 /opt/oracle/ghome/install/response/grid_2019-05-09_05-57-03PM.rsp

You can find the log of this install session at:
 /tmp/GridSetupActions2019-05-09_05-57-03PM/gridSetupActions2019-05-09_05-57-03PM.log

As a root user, execute the following script(s):
        1. /opt/oracle/oraInventory/orainstRoot.sh
        2. /opt/oracle/ghome/root.sh

Execute /opt/oracle/oraInventory/orainstRoot.sh on the following nodes: 
[ydb01, ydb02]
Execute /opt/oracle/ghome/root.sh on the following nodes: 
[ydb01, ydb02]

Run the script on the local node first. After successful completion, you can start the script in parallel on all other nodes.

Successfully Setup Software with warning(s).
As install user, execute the following command to complete the configuration.
        /opt/oracle/ghome/gridSetup.sh -executeConfigTools -responseFile /opt/oracle/ghome/install/response/gridsetup.rsp [-silent]


Moved the install session logs to:
 /opt/oracle/oraInventory/logs/GridSetupActions2019-05-09_05-57-03PM

2.5.运行root.sh脚本

节点1

[root@ydb01 rpm]# /opt/oracle/oraInventory/orainstRoot.sh
Changing permissions of /opt/oracle/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /opt/oracle/oraInventory to oinstall.
The execution of the script is complete.
[root@ydb01 rpm]#  /opt/oracle/ghome/root.sh
Check /opt/oracle/ghome/install/root_ydb01_2019-05-09_18-08-42-494250060.log for the output of root script
[root@ydb01 rpm]# 

日志如下

[root@ydb01 ~]# tail -1000f /opt/oracle/ghome/install/root_ydb01_2019-05-09_18-08-42-494250060.log
Performing root user operation.

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /opt/oracle/ghome
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...


Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /opt/oracle/ghome/crs/install/crsconfig_params
The log of current session can be found at:
  /opt/oracle/gbase/crsdata/ydb01/crsconfig/rootcrs_ydb01_2019-05-09_06-08-59PM.log
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/05/09 18:09:11 CLSRSC-363: User ignored prerequisites during installation
2019/05/09 18:09:11 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/05/09 18:09:14 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/05/09 18:09:15 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/05/09 18:09:40 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/05/09 18:09:43 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/05/09 18:09:45 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/05/09 18:09:57 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/05/09 18:09:57 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/05/09 18:10:04 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/05/09 18:10:04 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/05/09 18:10:55 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/05/09 18:11:02 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/05/09 18:12:07 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/05/09 18:12:13 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.

ASM has been created and started successfully.

[DBT-30001] Disk groups created successfully. Check /opt/oracle/gbase/cfgtoollogs/asmca/asmca-190509PM061247.log for details.

2019/05/09 18:13:46 CLSRSC-482: Running command: '/opt/oracle/ghome/bin/ocrconfig -upgrade grid oinstall'
CRS-4256: Updating the profile
Successful addition of voting disk 017fc3cc7d164fb5bf872733c61934dd.
Successful addition of voting disk 6a40f886828b4f36bfedfadafd0274a1.
Successful addition of voting disk 9f457e5961804fbabf20c7a7a2cc3304.
Successfully replaced voting disk group with +OCRDG.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
 1. ONLINE   017fc3cc7d164fb5bf872733c61934dd (/dev/asmdisks/asmdisk03) [OCRDG]
 2. ONLINE   6a40f886828b4f36bfedfadafd0274a1 (/dev/asmdisks/asmdisk01) [OCRDG]
 3. ONLINE   9f457e5961804fbabf20c7a7a2cc3304 (/dev/asmdisks/asmdisk02) [OCRDG]
Located 3 voting disk(s).
2019/05/09 18:15:27 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/05/09 18:16:37 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/05/09 18:16:37 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/05/09 18:18:35 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/05/09 18:19:13 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

节点2

[root@ydb02 ~]# /opt/oracle/oraInventory/orainstRoot.sh
Changing permissions of /opt/oracle/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.

Changing groupname of /opt/oracle/oraInventory to oinstall.
The execution of the script is complete.
[root@ydb02 ~]#
[root@ydb02 ~]# /opt/oracle/ghome/root.sh
Check /opt/oracle/ghome/install/root_ydb02_2019-05-09_18-21-00-812198655.log for the output of root script

日志如下

[root@ydb02 ~]# tail -1000f /opt/oracle/ghome/install/root_ydb02_2019-05-09_18-21-00-812198655.log
Performing root user operation.

The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /opt/oracle/ghome
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...

Entries will be added to the /etc/oratab file as needed by
Database Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on option
Using configuration parameter file: /opt/oracle/ghome/crs/install/crsconfig_params
The log of current session can be found at:
  /opt/oracle/gbase/crsdata/ydb02/crsconfig/rootcrs_ydb02_2019-05-09_06-21-38PM.log
2019/05/09 18:21:44 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.
2019/05/09 18:21:45 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.
2019/05/09 18:21:45 CLSRSC-363: User ignored prerequisites during installation
2019/05/09 18:21:45 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.
2019/05/09 18:21:46 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.
2019/05/09 18:21:46 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.
2019/05/09 18:21:47 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.
2019/05/09 18:21:47 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.
2019/05/09 18:21:51 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.
2019/05/09 18:21:51 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.
2019/05/09 18:22:01 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.
2019/05/09 18:22:01 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.
2019/05/09 18:22:04 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.
2019/05/09 18:22:05 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'
2019/05/09 18:22:19 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.
2019/05/09 18:22:54 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.
2019/05/09 18:22:56 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.
2019/05/09 18:23:57 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.
2019/05/09 18:23:59 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.
2019/05/09 18:24:11 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.
2019/05/09 18:25:51 CLSRSC-343: Successfully started Oracle Clusterware stack
2019/05/09 18:25:51 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.
2019/05/09 18:26:30 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.
2019/05/09 18:26:41 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded

2.6.集群ConfigTools

在节点1执行

[grid@ydb01 ~]$ ${ORACLE_HOME}/gridSetup.sh -silent -executeConfigTools  -waitforcompletion \
>  -responseFile ${ORACLE_HOME}/install/response/gridsetup.rsp \
>  INVENTORY_LOCATION=/opt/oracle/oraInventory \
>  SELECTED_LANGUAGES=en,en_GB \
>  oracle.install.option=CRS_CONFIG \
>  ORACLE_BASE=/opt/oracle/gbase \
>  oracle.install.asm.OSDBA=asmdba \
>  oracle.install.asm.OSASM=asmadmin \
>  oracle.install.asm.OSOPER=asmoper  \
>  oracle.install.crs.config.scanType=LOCAL_SCAN \
>  oracle.install.crs.config.gpnp.scanName=ydb-scan \
>  oracle.install.crs.config.gpnp.scanPort=1521 \
>  oracle.install.crs.config.ClusterConfiguration=STANDALONE \
>  oracle.install.crs.config.configureAsExtendedCluster=false \
>  oracle.install.crs.config.clusterName=ora19c-cluster \
>  oracle.install.crs.config.gpnp.configureGNS=false \
>  oracle.install.crs.config.autoConfigureClusterNodeVIP=false \
>  oracle.install.crs.config.clusterNodes=ydb01:ydb01-vip:HUB,ydb02:ydb02-vip:HUB \
>  oracle.install.crs.config.networkInterfaceList=ens34:192.168.10.0:1,ens35:172.16.16.0:5,ens36:172.16.16.0:5 \
>  oracle.install.asm.configureGIMRDataDG=false \
>  oracle.install.crs.config.useIPMI=false \
>  oracle.install.asm.storageOption=ASM \
>  oracle.install.asmOnNAS.configureGIMRDataDG=false \
>  oracle.install.asm.SYSASMPassword=Oracle_2019 \
>  oracle.install.asm.diskGroup.name=OCRDG \
>  oracle.install.asm.diskGroup.redundancy=NORMAL \
>  oracle.install.asm.diskGroup.AUSize=4 \
>  oracle.install.asm.diskGroup.disksWithFailureGroupNames=/dev/asmdisks/asmdisk01,,/dev/asmdisks/asmdisk02,,/dev/asmdisks/asmdisk03,  \
>  oracle.install.asm.diskGroup.disks=/dev/asmdisks/asmdisk01,/dev/asmdisks/asmdisk02,/dev/asmdisks/asmdisk03  \
>  oracle.install.asm.diskGroup.diskDiscoveryString=/dev/asmdisks/*  \
>  oracle.install.asm.configureAFD=false \
>  oracle.install.asm.monitorPassword=Oracle_2019 \
>  oracle.install.crs.configureRHPS=false \
>  oracle.install.crs.config.ignoreDownNodes=false \
>  oracle.install.config.managementOption=NONE \
>  oracle.install.config.omsPort=0 \
>  oracle.install.crs.rootconfig.executeRootScript=false \
> 
Launching Oracle Grid Infrastructure Setup Wizard...

You can find the logs of this session at:
/opt/oracle/oraInventory/logs/GridSetupActions2019-05-09_06-31-24PM

You can find the log of this install session at:
 /opt/oracle/oraInventory/logs/UpdateNodeList2019-05-09_06-31-24PM.log
Configuration failed.
[WARNING] [INS-43080] Some of the configuration assistants failed, were cancelled or skipped.
   ACTION: Refer to the logs or contact Oracle Support Services.
[grid@ydb01 ~]$ 

集群状态如下

[grid@ydb01 ~]$ crsctl query crs activeversion
Oracle Clusterware active version on the cluster is [19.0.0.0.0]
[grid@ydb01 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[grid@ydb01 ~]$ crsctl status res -t
--------------------------------------------------------------------------------
Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.chad
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.net1.network
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.ons
               ONLINE  ONLINE       ydb01                    STABLE
               ONLINE  ONLINE       ydb02                    STABLE
ora.proxy_advm
               OFFLINE OFFLINE      ydb01                    STABLE
               OFFLINE OFFLINE      ydb02                    STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.OCRDG.dg(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asm(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    Started,STABLE
      2        ONLINE  ONLINE       ydb02                    Started,STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)
      1        ONLINE  ONLINE       ydb01                    STABLE
      2        ONLINE  ONLINE       ydb02                    STABLE
      3        OFFLINE OFFLINE                               STABLE
ora.cvu
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.qosmserver
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.scan1.vip
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.ydb01.vip
      1        ONLINE  ONLINE       ydb01                    STABLE
ora.ydb02.vip
      1        ONLINE  ONLINE       ydb02                    STABLE
--------------------------------------------------------------------------------
[grid@ydb01 ~]$ 

2.7.安装后检查

[grid@ydb01 ~]$ $ORACLE_HOME/runcluvfy.sh  stage -post  crsinst -n "ydb01,ydb02"  -verbose 
..............
..............
..............
Post-check for cluster services setup was unsuccessful. 
Checks did not pass for the following nodes:
        ydb02,ydb01
        
Failures were encountered during execution of CVU verification request "stage -post crsinst".

Verifying Single Client Access Name (SCAN) ...FAILED
PRVG-11372 : Number of SCAN IP addresses that SCAN "ydb-scan" resolved to did
not match the number of SCAN VIP resources
  Verifying DNS/NIS name service 'ydb-scan' ...FAILED
  PRVG-1101 : SCAN name "ydb-scan" failed to resolve

CVU operation performed:      stage -post crsinst
Date:                         May 9, 2019 6:37:30 PM
CVU home:                     /opt/oracle/ghome/
User:                         grid
[grid@ydb01 ~]$ 
三.安装部署database

3.1.解压软件

mkdir  -p $ORACLE_HOME
unzip LINUX.X64_193000_db_home.zip -d $ORACLE_HOME

3.2.配置互信

$ORACLE_HOME/oui/prov/resources/scripts/sshUserSetup.sh -user oracle  -hosts "ydb01 ydb02"  -advanced -noPromptPassphrase

[oracle@ydb01 scripts]$ ssh ydb02 date      
Thu May  9 20:34:44 CST 2019
[oracle@ydb01 scripts]$ ssh ydb02-priv1 date
Thu May  9 20:34:51 CST 2019
[oracle@ydb01 scripts]$ ssh ydb02-priv2 date
Thu May  9 20:34:58 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01 date      
Thu May  9 20:35:02 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01-priv1 date
Thu May  9 20:35:11 CST 2019
[oracle@ydb01 scripts]$ ssh ydb01-priv2 date
Thu May  9 20:35:17 CST 2019

3.3.安装前检查

[oracle@ydb01 ~]$  /opt/oracle/ghome/runcluvfy.sh stage -pre dbinst -n "ydb01,ydb02"  -verbose
......................
......................
......................
Failures were encountered during execution of CVU verification request "stage -pre dbinst".

Verifying resolv.conf Integrity ...FAILED
ydb02: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02

ydb01: PRVF-5636 : The DNS response time for an unreachable node exceeded
       "15000" ms on following nodes: ydb01,ydb02

Verifying Single Client Access Name (SCAN) ...FAILED
PRVG-11372 : Number of SCAN IP addresses that SCAN "ydb-scan" resolved to did
not match the number of SCAN VIP resources

  Verifying DNS/NIS name service 'ydb-scan' ...FAILED
  PRVG-1101 : SCAN name "ydb-scan" failed to resolve

Verifying Maximum locked memory check ...FAILED
ydb02: PRVE-0042 : Maximum locked memory "HARD" limit when automatic memory
       management is enabled is less than the recommended value in the file
       "/etc/security/limits.conf" [Expected = "3145728", Retrieved="-1"]  on
       node "ydb02"

ydb01: PRVE-0042 : Maximum locked memory "HARD" limit when automatic memory
       management is enabled is less than the recommended value in the file
       "/etc/security/limits.conf" [Expected = "3145728", Retrieved="-1"]  on
       node "ydb01"


CVU operation performed:      stage -pre dbinst
Date:                         May 9, 2019 8:41:25 PM
CVU home:                     /opt/oracle/ghome/
User:                         oracle

3.4.安装database软件

[oracle@ydb01 ~]$ ${ORACLE_HOME}/runInstaller -ignorePrereq -waitforcompletion -silent \
>  -responseFile ${ORACLE_HOME}/install/response/db_install.rsp \
>  oracle.install.option=INSTALL_DB_SWONLY \
>  ORACLE_HOSTNAME=/opt/oracle/oraInventory \
>  UNIX_GROUP_NAME=oinstall \
>  INVENTORY_LOCATION=/opt/oracle/oraInventory \
>  SELECTED_LANGUAGES=en,en_GB \
>  ORACLE_HOME=/opt/oracle/dbbase/19c/db_1 \
>  ORACLE_BASE=/opt/oracle/dbbase \
>  oracle.install.db.InstallEdition=EE \
>  oracle.install.db.OSDBA_GROUP=dba \
>  oracle.install.db.OSOPER_GROUP=oper \
>  oracle.install.db.OSBACKUPDBA_GROUP=backupdba \
>  oracle.install.db.OSDGDBA_GROUP=dgdba \
>  oracle.install.db.OSKMDBA_GROUP=kmdba \
>  oracle.install.db.OSRACDBA_GROUP=racdba\
>  oracle.install.db.CLUSTER_NODES=ydb01,ydb02 \
>  oracle.install.db.isRACOneInstall=false \
>  oracle.install.db.rac.serverpoolCardinality=0 \
>  oracle.install.db.config.starterdb.type=GENERAL_PURPOSE \
>  oracle.install.db.ConfigureAsContainerDB=false \
>  SECURITY_UPDATES_VIA_MYORACLESUPPORT=false \
>  DECLINE_SECURITY_UPDATES=true \
>  
Launching Oracle Database Setup Wizard...

[WARNING] [INS-13013] Target environment does not meet some mandatory requirements.
   CAUSE: Some of the mandatory prerequisites are not met. See logs for details. /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log
   ACTION: Identify the list of failed prerequisite checks from the log: /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log. Then either from the log file or from installation manual find the appropriate configuration to meet the prerequisites and fix it manually.
The response file for this session can be found at:
 /opt/oracle/dbbase/19c/db_1/install/response/db_2019-05-09_09-00-49PM.rsp

You can find the log of this install session at:
 /opt/oracle/oraInventory/logs/InstallActions2019-05-09_09-00-49PM/installActions2019-05-09_09-00-49PM.log

As a root user, execute the following script(s):
        1. /opt/oracle/dbbase/19c/db_1/root.sh

Execute /opt/oracle/dbbase/19c/db_1/root.sh on the following nodes: 
[ydb01, ydb02]


Successfully Setup Software with warning(s).
[oracle@ydb01 ~]$ 
四.创建数据库

4.1.普通数据库

dbca -silent -createDatabase \
 -templateName General_Purpose.dbc \
 -gdbname emrep -responseFile NO_VALUE \
 -characterSet AL32UTF8 \
 -sysPassword Oracle_2019 \
 -systemPassword Oracle_2019 \
 -createAsContainerDatabase false \
 -databaseType MULTIPURPOSE \
 -automaticMemoryManagement false \
 -totalMemory 1024 \
 -redoLogFileSize 50 \
 -emConfiguration NONE \
 -ignorePreReqs \
 -nodelist ydb01,ydb02 \
 -storageType ASM \
 -diskGroupName +DATADG \
 -asmsnmpPassword Oracle_2019 \
 -recoveryAreaDestination  NONE \
 

4.2.可插拔数据库

dbca -silent -createDatabase \
 -templateName General_Purpose.dbc \
 -gdbname emrep -responseFile NO_VALUE \
 -characterSet AL32UTF8 \
 -sysPassword Oracle_2019 \
 -systemPassword Oracle_2019 \
 -createAsContainerDatabase true \
 -numberOfPDBs 1 \
 -pdbName yong \
 -pdbAdminPassword Oracle_2019 \
 -databaseType MULTIPURPOSE \
 -automaticMemoryManagement false \
 -totalMemory 1024 \
 -redoLogFileSize 50 \
 -emConfiguration NONE \
 -ignorePreReqs \
 -nodelist ydb01,ydb02 \
 -storageType ASM \
 -diskGroupName +DATADG \
 -asmsnmpPassword Oracle_2019 \
 -recoveryAreaDestination  NONE \

 

 

 

 

Linux平台 Oracle 19c RAC安装

一、 实施前期准备工作

二、 安装前期准备工作

Linux平台 Oracle 19c RAC安装指导: 
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作 
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置 
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

一、实施前期准备工作

1.1 服务器安装操作系统

配置完全相同的两台服务器,安装相同版本的Linux操作系统。留存系统光盘或者镜像文件。 
我这里是OEL7.6,系统目录大小均一致。对应OEL7.6的系统镜像文件放在服务器上,供后面配置本地yum使用。 

1.2 Oracle安装介质

Oracle 19.3 版本2个zip包(总大小6G+,注意空间): 
LINUX.X64_193000_grid_home.zip MD5: 
LINUX.X64_193000_db_home.zip MD5: 
这个自己去Oracle官网下载,然后只需要上传到节点1即可。 

1.3 共享存储规划

从存储中划分出两台主机可以同时看到的共享LUN,3个1G的盘用作OCR和Voting Disk,其余分了3个12G的盘规划做用做数据盘和FRA。 
注:19c安装GI时,可以选择是否配置GIMR,且默认不配置,我这里选择不配置,所以无需再给GIMR分配对应空间。

 

--OEL7使用udev需要给磁盘创建分区,这里我使用fdisk 将对应盘创建一个主分区,分区号为2(这里只是为了区分):
sdb  sdc  sdd  sde  sdf  sdg 
sdb2 sdc2 sdd2 sde2 sdf2 sdg21G   1G   1G   12G  12G  12G
--OEL7中udev需绑定对应磁盘的分区for i in b c d e f g;doecho "KERNEL==\"sd?2\", SUBSYSTEM==\"block\", PROGRAM==\"/usr/lib/udev/scsi_id -g -u -d /dev/\$parent\", RESULT==\"`/usr/lib/udev/scsi_id -g -u -d /dev/sd\$i`\", SYMLINK+=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      done
--vi /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB208b8d32-df9af9d6", SYMLINK+="asm-ocr1", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBe51f4d0a-1b73c589", SYMLINK+="asm-ocr2", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBc63b1aa3-1e290288", SYMLINK+="asm-ocr3", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB53ee20b6-40c4b9a3", SYMLINK+="asm-data01", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VB3822afbf-62d8a84d", SYMLINK+="asm-data02", OWNER="grid", GROUP="asmadmin", MODE="0660"KERNEL=="sd?2", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d /dev/$parent", RESULT=="1ATA_VBOX_HARDDISK_VBfbb7943a-5ec216e5", SYMLINK+="asm-data03", OWNER="grid", GROUP="asmadmin", MODE="0660"--udevadm配置重载生效:
[root@db193 rules.d]# udevadm control --reload[root@db193 rules.d]# udevadm trigger--确认udev已绑定成功:
[root@db193 rules.d]# ls -l /dev/asm*lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data01 -> sde2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data02 -> sdf2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-data03 -> sdg2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr1 -> sdb2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr2 -> sdc2
lrwxrwxrwx. 1 root root 4 Jul 30 23:45 /dev/asm-ocr3 -> sdd2
--第二个节点db195最开始直接使用udevadm操作发现不行,此时需先partprobe,再udevadm触发即可成功
--使用partprobe将磁盘分区表变化信息通知内核,请求操作系统重新加载分区表
[root@db195 ~]# partprobe /dev/sdb[root@db195 ~]# partprobe /dev/sdc[root@db195 ~]# partprobe /dev/sdd[root@db195 ~]# partprobe /dev/sde[root@db195 ~]# partprobe /dev/sdf[root@db195 ~]# partprobe /dev/sdg--udevadm配置重载生效:
[root@db195 ~]# udevadm control --reload[root@db195 ~]# udevadm trigger--确认udev已绑定成功:
[root@db195 ~]# ls -l /dev/asm*lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data01 -> sde2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data02 -> sdf2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-data03 -> sdg2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr1 -> sdb2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr2 -> sdc2
lrwxrwxrwx. 1 root root 4 Jul 30 23:49 /dev/asm-ocr3 -> sdd2

我这次搭建的实验环境为了精简,没有再去模拟多路径的环境,如果想尽可能的模拟生产环境,可以参考之前18c的配置:

1.4 网络规范分配

公有网络 以及 私有网络。 
公有网络:这里实验环境是enp0s3是public IP,enp0s8是ASM & Private IP,实际生产需根据实际情况调整规划,一般public是有OS层绑定(bonding),private是使用HAIP。

二、安装前期准备工作

2.1 各节点系统时间校对

各节点系统时间校对:

--检验时间和时区确认正确date --关闭chrony服务,移除chrony配置文件(后续使用ctss)systemctl list-unit-files|grep chronyd
systemctl status chronyd
systemctl disable chronyd
systemctl stop chronyd
mv /etc/chrony.conf /etc/chrony.conf_bak

这里实验环境,选择不使用NTP和chrony,这样Oracle会自动使用自己的ctss服务。 

2.2 各节点关闭防火墙和SELinux

各节点关闭防火墙:

 

systemctl list-unit-files|grep firewalld
systemctl status firewalld
systemctl disable firewalld
systemctl stop firewalld

各节点关闭SELinux:

getenforce
cat /etc/selinux/config手工修改/etc/selinux/config SELINUX=disabled,或使用下面命令:
sed -i '/^SELINUX=.*/ s//SELINUX=disabled/' /etc/selinux/configsetenforce 0

最后核实各节点已经关闭SELinux即可。 

2.3 各节点检查系统依赖包安装情况

 

yum install -y oracle-database-preinstall-18c-1.0-1.el7.x86_64

在OEL7.6中还是oracle-database-preinstall-18c的名字,并没有对应19c的,但实际测试,在依赖包方面基本没区别。 
如果选用的是其他Linux,比如常用的RHEL,那就需要yum安装官方文档要求的依赖包了。 

2.4 各节点配置/etc/hosts

编辑/etc/hosts文件:

 

#public ip192.168.1.193  db193192.168.1.195  db195#virtual ip192.168.1.194  db193-vip192.168.1.196  db195-vip#scan ip192.168.1.197  db19c-scan#private ip10.10.1.193    db193-priv10.10.1.195    db195-priv

修改主机名(建议由SA调整):

--例如:修改主机名为db193:hostnamectl statushostnamectl set-hostname db193
hostnamectl status

2.5 各节点创建需要的用户和组

创建group & user,给oracle、grid设置密码:

groupadd -g 54321 oinstall  
groupadd -g 54322 dba  
groupadd -g 54323 oper  
groupadd -g 54324 backupdba  
groupadd -g 54325 dgdba  
groupadd -g 54326 kmdba  
groupadd -g 54327 asmdba  
groupadd -g 54328 asmoper  
groupadd -g 54329 asmadmin  
groupadd -g 54330 racdba  
  
useradd -u 54321 -g oinstall -G dba,asmdba,backupdba,dgdba,kmdba,racdba,oper oracle  
useradd -u 54322 -g oinstall -G asmadmin,asmdba,asmoper,dba grid  
echo oracle | passwd --stdin oracleecho oracle | passwd --stdin grid

我这里测试环境设置密码都是oracle,实际生产环境建议设置符合规范的复杂密码。 

2.6 各节点创建安装目录

各节点创建安装目录(root用户):

 

mkdir -p /u01/app/19.3.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
chown -R grid:oinstall /u01
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/

2.7 各节点系统配置文件修改

内核参数修改:vi /etc/sysctl.conf 
实际上OEL在安装依赖包的时候也同时修改了这些值,以下参数主要是核对或是对RHEL版本作为参考:

# vi /etc/sysctl.conf  增加如下内容:
vm.swappiness = 1vm.dirty_background_ratio = 3vm.dirty_ratio = 80vm.dirty_expire_centisecs = 500vm.dirty_writeback_centisecs = 100kernel.shmmni = 4096  kernel.shmall = 1073741824  kernel.shmmax = 4398046511104kernel.sem = 250 32000 100 128net.ipv4.ip_local_port_range = 9000 65500 net.core.rmem_default = 262144net.core.rmem_max = 4194304net.core.wmem_default = 262144net.core.wmem_max = 1048576 fs.aio-max-nr = 1048576fs.file-max = 6815744kernel.panic_on_oops = 1net.ipv4.conf.enp0s8.rp_filter = 2net.ipv4.conf.enp0s9.rp_filter = 2net.ipv4.conf.enp0s10.rp_filter = 2

修改生效:

#sysctl -p /etc/sysctl.conf

注:enp0s9和enp0s10是IPSAN专用的网卡,跟私网一样设置loose mode。(我这里因为没有使用IPSAN,所以没有这两张网卡)

#sysctl -p /etc/sysctl.d/98-oracle.confnet.ipv4.conf.enp0s8.rp_filter = 2net.ipv4.conf.enp0s9.rp_filter = 2net.ipv4.conf.enp0s10.rp_filter = 2

用户shell的限制:vi /etc/security/limits.d/99-grid-oracle-limits.conf

oracle soft nproc 16384oracle hard nproc 16384oracle soft nofile 1024oracle hard nofile 65536oracle soft stack 10240oracle hard stack 32768grid soft nproc 16384grid hard nproc 16384grid soft nofile 1024grid hard nofile 65536grid soft stack 10240grid hard stack 32768

这里需要注意OEL自动配置的 /etc/security/limits.d/oracle-database-server-12cR2-preinstall.conf 并不包含grid用户的,可以手工加上。

vi /etc/profile.d/oracle-grid.sh

#Setting the appropriate ulimits for oracle and grid userif [ $USER = "oracle" ]; then
 if [ $SHELL = "/bin/ksh" ]; then
 ulimit -u 16384 
 ulimit -n 65536 else
 ulimit -u 16384 -n 65536 fifiif [ $USER = "grid" ]; then
 if [ $SHELL = "/bin/ksh" ]; then
 ulimit -u 16384 ulimit -n 65536 else
 ulimit -u 16384 -n 65536 fifi

这个OEL中也没有自动配置,需要手工配置。

2.8 各节点设置用户的环境变量

第1个节点grid用户:

export ORACLE_SID=+ASM1;export ORACLE_BASE=/u01/app/grid;export ORACLE_HOME=/u01/app/19.3.0/grid;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第2个节点grid用户:

export ORACLE_SID=+ASM2;export ORACLE_BASE=/u01/app/grid;export ORACLE_HOME=/u01/app/19.3.0/grid;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第1个节点oracle用户:

export ORACLE_SID=jydb1;export ORACLE_BASE=/u01/app/oracle;export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

第2个节点oracle用户:

export ORACLE_SID=jydb2;export ORACLE_BASE=/u01/app/oracle;export ORACLE_HOME=/u01/app/oracle/product/19.3.0/db_1;export PATH=$ORACLE_HOME/bin:$PATH;export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib;

 

三、 GI(Grid Infrastructure)安装

Linux平台 Oracle 19c RAC安装指导: 
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作 
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置 
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

三、GI(Grid Infrastructure)安装

3.1 解压GI的安装包

su - grid 
解压 GRID 到 GRID用户的$ORACLE_HOME下

[grid@db193 grid]$ pwd/u01/app/19.3.0/grid
[grid@db193 grid]$ unzip /u01/media/LINUX.X64_193000_grid_home.zip

3.2 安装配置Xmanager软件

在自己的Windows系统上成功安装Xmanager Enterprise之后,运行Xstart.exe可执行程序, 
配置如下

Session:db193 
Host:192.168.1.193 
Protocol:SSH 
User Name:grid 
Execution Command:/usr/bin/xterm -ls -display $DISPLAY 
点击RUN,输入grid用户的密码可以正常弹出命令窗口界面,即配置成功。 
当然也可以通过开启Xmanager - Passive,直接在SecureCRT连接的会话窗口中临时配置DISPLAY变量直接调用图形: 
export DISPLAY=192.168.1.31:0.0

3.3 共享存储LUN的赋权

在《 Linux平台 Oracle 19c RAC安装Part1:准备工作 -> 1.3 共享存储规划》中已完成绑定和权限,这里不需要再次操作。

--只需确认两个节点链接文件(/dev/asm*)对应的sd[b-g]2的权限正确即可:
# ls -l /dev/sd?2brw-rw---- 1 root disk     8,  2 Jul 31 09:13 /dev/sda2brw-rw---- 1 grid asmadmin 8, 18 Jul 31 18:06 /dev/sdb2brw-rw---- 1 grid asmadmin 8, 34 Jul 31 18:06 /dev/sdc2brw-rw---- 1 grid asmadmin 8, 50 Jul 31 18:06 /dev/sdd2brw-rw---- 1 grid asmadmin 8, 66 Jul 31 17:10 /dev/sde2brw-rw---- 1 grid asmadmin 8, 82 Jul 31 17:10 /dev/sdf2brw-rw---- 1 grid asmadmin 8, 98 Jul 31 17:10 /dev/sdg2

3.4 使用Xmanager图形化界面配置GI

Xmanager通过grid用户登录,进入$ORACLE_HOME目录,运行gridSetup配置GI

$ cd $ORACLE_HOME$ ./gridSetup.sh

其实从12cR2开始,GI的配置就跟之前有一些变化,19c也一样,下面来看下GI配置的整个图形化安装的过程截图: 
Oracle 19c rac的搭建_.net
Oracle 19c rac的搭建_oracle_02
Oracle 19c rac的搭建_css_03
Oracle 19c rac的搭建_linux_04
Oracle 19c rac的搭建_数据库_05
注:这里Public网卡这里用的enp0s3,ASM&Private这里用的enp0s8。 
Oracle 19c rac的搭建_oracle_06
Oracle 19c rac的搭建_.net_07
注:这里有一个新的存储GIMR的,之前12c、18c版本都是选择是外部冗余的一个40G大小的盘(当初给很多刚接触安装12cRAC的DBA造成了不适),而在19c安装中可以看到默认就是不选择的,恢复了11g时代的清爽感,这点我个人觉得很赞。 
Oracle 19c rac的搭建_oracle_08
注:这里跟之前区别不大,我依然是选择3块1G的盘,Normal冗余作为OCR和voting disk。 
Oracle 19c rac的搭建_oracle_09
Oracle 19c rac的搭建_.net_10
Oracle 19c rac的搭建_oracle_11
Oracle 19c rac的搭建_数据库_12
Oracle 19c rac的搭建_css_13
Oracle 19c rac的搭建_linux_14
Oracle 19c rac的搭建_linux_15
Oracle 19c rac的搭建_linux_16
注:这里检查出来的问题都需要认真核对,确认确实可以忽略才可以点击“Ignore All”,如果这里检测出缺少某些RPM包,需要使用yum安装好。我这里是自己的测试环境,分的配置较低,所以有内存和swap检测不通过,实际生产环境不应出现。 
Oracle 19c rac的搭建_linux_17
Oracle 19c rac的搭建_linux_18
注:执行root脚本,确保先在一节点执行完毕后,再在其他节点执行。

第一个节点root执行脚本:

[root@db193 ~]# /u01/app/oraInventory/orainstRoot.shChanging permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@db193 ~]# /u01/app/19.3.0/grid/root.shPerforming root user operation.
The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]: 
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed byDatabase Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on optionUsing configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
  /u01/app/grid/crsdata/db193/crsconfig/rootcrs_db193_2019-07-31_07-27-23AM.log2019/07/31 07:28:07 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.2019/07/31 07:28:08 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.2019/07/31 07:28:08 CLSRSC-363: User ignored prerequisites during installation2019/07/31 07:28:08 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.2019/07/31 07:28:17 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.2019/07/31 07:28:21 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.2019/07/31 07:31:54 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.2019/07/31 07:32:24 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.2019/07/31 07:32:37 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.2019/07/31 07:33:17 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.2019/07/31 07:33:18 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.2019/07/31 07:33:42 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.2019/07/31 07:33:43 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'2019/07/31 07:34:48 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.2019/07/31 07:35:21 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.2019/07/31 07:35:50 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.2019/07/31 07:36:14 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.ASM has been created and started successfully.
[DBT-30001] Disk groups created successfully. Check /u01/app/grid/cfgtoollogs/asmca/asmca-190731AM073727.log for details.2019/07/31 07:40:00 CLSRSC-482: Running command: '/u01/app/19.3.0/grid/bin/ocrconfig -upgrade grid oinstall'CRS-4256: Updating the profile
Successful addition of voting disk b789e47e76d74f06bf5f8b5cb4d62b88.
Successful addition of voting disk 3bc8119dfafe4fbebf7e1bf11aec8b9a.
Successful addition of voting disk bccdf28694a54ffcbf41354c7e4f133d.
Successfully replaced voting disk group with +CRS.
CRS-4256: Updating the profile
CRS-4266: Voting file(s) successfully replaced##  STATE    File Universal Id                File Name Disk group--  -----    -----------------                --------- --------- 1. ONLINE   b789e47e76d74f06bf5f8b5cb4d62b88 (/dev/asm-ocr1) [CRS] 2. ONLINE   3bc8119dfafe4fbebf7e1bf11aec8b9a (/dev/asm-ocr2) [CRS] 3. ONLINE   bccdf28694a54ffcbf41354c7e4f133d (/dev/asm-ocr3) [CRS]
Located 3 voting disk(s).2019/07/31 07:48:20 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.2019/07/31 07:50:48 CLSRSC-343: Successfully started Oracle Clusterware stack2019/07/31 07:50:49 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.2019/07/31 08:04:16 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.2019/07/31 08:08:26 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@db193 ~]#

执行成功后,在第二个节点root执行脚本:

[root@db195 ~]# /u01/app/oraInventory/orainstRoot.shChanging permissions of /u01/app/oraInventory.
Adding read,write permissions for group.
Removing read,write,execute permissions for world.
Changing groupname of /u01/app/oraInventory to oinstall.
The execution of the script is complete.
[root@db195 ~]# /u01/app/19.3.0/grid/root.shPerforming root user operation.
The following environment variables are set as:
    ORACLE_OWNER= grid
    ORACLE_HOME=  /u01/app/19.3.0/grid
Enter the full pathname of the local bin directory: [/usr/local/bin]: 
   Copying dbhome to /usr/local/bin ...
   Copying oraenv to /usr/local/bin ...
   Copying coraenv to /usr/local/bin ...
Creating /etc/oratab file...
Entries will be added to the /etc/oratab file as needed byDatabase Configuration Assistant when a database is created
Finished running generic part of root script.
Now product-specific root actions will be performed.
Relinking oracle with rac_on optionUsing configuration parameter file: /u01/app/19.3.0/grid/crs/install/crsconfig_params
The log of current session can be found at:
  /u01/app/grid/crsdata/db195/crsconfig/rootcrs_db195_2019-07-31_08-10-55AM.log2019/07/31 08:11:34 CLSRSC-594: Executing installation step 1 of 19: 'SetupTFA'.2019/07/31 08:11:34 CLSRSC-594: Executing installation step 2 of 19: 'ValidateEnv'.2019/07/31 08:11:34 CLSRSC-363: User ignored prerequisites during installation2019/07/31 08:11:34 CLSRSC-594: Executing installation step 3 of 19: 'CheckFirstNode'.2019/07/31 08:11:44 CLSRSC-594: Executing installation step 4 of 19: 'GenSiteGUIDs'.2019/07/31 08:11:44 CLSRSC-594: Executing installation step 5 of 19: 'SetupOSD'.2019/07/31 08:11:45 CLSRSC-594: Executing installation step 6 of 19: 'CheckCRSConfig'.2019/07/31 08:11:51 CLSRSC-594: Executing installation step 7 of 19: 'SetupLocalGPNP'.2019/07/31 08:12:03 CLSRSC-594: Executing installation step 8 of 19: 'CreateRootCert'.2019/07/31 08:12:04 CLSRSC-594: Executing installation step 9 of 19: 'ConfigOLR'.2019/07/31 08:12:32 CLSRSC-594: Executing installation step 10 of 19: 'ConfigCHMOS'.2019/07/31 08:12:33 CLSRSC-594: Executing installation step 11 of 19: 'CreateOHASD'.2019/07/31 08:12:56 CLSRSC-594: Executing installation step 12 of 19: 'ConfigOHASD'.2019/07/31 08:12:59 CLSRSC-330: Adding Clusterware entries to file 'oracle-ohasd.service'2019/07/31 08:13:59 CLSRSC-4002: Successfully installed Oracle Trace File Analyzer (TFA) Collector.2019/07/31 08:14:31 CLSRSC-594: Executing installation step 13 of 19: 'InstallAFD'.2019/07/31 08:14:42 CLSRSC-594: Executing installation step 14 of 19: 'InstallACFS'.2019/07/31 08:14:51 CLSRSC-594: Executing installation step 15 of 19: 'InstallKA'.2019/07/31 08:14:58 CLSRSC-594: Executing installation step 16 of 19: 'InitConfig'.2019/07/31 08:15:32 CLSRSC-594: Executing installation step 17 of 19: 'StartCluster'.2019/07/31 08:17:41 CLSRSC-343: Successfully started Oracle Clusterware stack2019/07/31 08:17:42 CLSRSC-594: Executing installation step 18 of 19: 'ConfigNode'.2019/07/31 08:22:03 CLSRSC-594: Executing installation step 19 of 19: 'PostConfig'.2019/07/31 08:23:05 CLSRSC-325: Configure Oracle Grid Infrastructure for a Cluster ... succeeded
[root@db195 ~]#

这段时间从打印的日志来看,虽然耗时也比以前11g时代长,但实际相比12c的上一个版本18c来说却缩短了不少。 
root脚本成功执行完后继续安装: 
Oracle 19c rac的搭建_.net_19
注:最后这个报错提示,查看日志发现是因为使用了一个scan ip的提示,可以忽略。 
Oracle 19c rac的搭建_linux_20
至此GI配置完成。

3.5 验证crsctl的状态

crsctl stat res -t查看集群资源状态信息,看到19c实际相对18c来说,有做很多的精简,更趋于稳定性因素考虑~

[grid@db193 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.chad               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.net1.network               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLEora.ons               ONLINE  ONLINE       db193                    STABLE               ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLEora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db193                    STABLEora.db193.vip      1        ONLINE  ONLINE       db193                    STABLEora.db195.vip      1        ONLINE  ONLINE       db195                    STABLEora.qosmserver      1        ONLINE  ONLINE       db193                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

crsctl stat res -t -init

[grid@db193 ~]$ crsctl stat res -t -init
--------------------------------------------------------------------------------Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.asm      1        ONLINE  ONLINE       db193                    Started,STABLEora.cluster_interconnect.haip      1        ONLINE  ONLINE       db193                    STABLEora.crf      1        ONLINE  ONLINE       db193                    STABLEora.crsd      1        ONLINE  ONLINE       db193                    STABLEora.cssd      1        ONLINE  ONLINE       db193                    STABLEora.cssdmonitor      1        ONLINE  ONLINE       db193                    STABLEora.ctssd      1        ONLINE  ONLINE       db193                    ACTIVE:0,STABLEora.diskmon      1        OFFLINE OFFLINE                               STABLEora.evmd      1        ONLINE  ONLINE       db193                    STABLEora.gipcd      1        ONLINE  ONLINE       db193                    STABLEora.gpnpd      1        ONLINE  ONLINE       db193                    STABLEora.mdnsd      1        ONLINE  ONLINE       db193                    STABLEora.storage      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

3.6 测试集群的FAILED OVER功能

节点2被重启,查看节点1状态:

[grid@db193 ~]$ crsctl stat res -t
--------------------------------------------------------------------------------Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db193                    STABLEora.chad               ONLINE  ONLINE       db193                    STABLEora.net1.network               ONLINE  ONLINE       db193                    STABLEora.ons               ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLEora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  OFFLINE                               STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db193                    STABLEora.db193.vip      1        ONLINE  ONLINE       db193                    STABLEora.db195.vip      1        ONLINE  INTERMEDIATE db193                    FAILED OVER,STABLEora.qosmserver      1        ONLINE  ONLINE       db193                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE
--------------------------------------------------------------------------------

节点1被重启,查看节点2状态:

[grid@db195 ~]$ crsctl stat res -t 
--------------------------------------------------------------------------------Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------Local Resources
--------------------------------------------------------------------------------ora.LISTENER.lsnr               ONLINE  ONLINE       db195                    STABLEora.chad               ONLINE  ONLINE       db195                    STABLEora.net1.network               ONLINE  ONLINE       db195                    STABLEora.ons               ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------Cluster Resources
--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.CRS.dg(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db195                    STABLEora.asm(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLEora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  OFFLINE                               STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLEora.cvu      1        ONLINE  ONLINE       db195                    STABLEora.db193.vip      1        ONLINE  INTERMEDIATE db195                    FAILED OVER,STABLEora.db195.vip      1        ONLINE  ONLINE       db195                    STABLEora.qosmserver      1        ONLINE  ONLINE       db195                    STABLEora.scan1.vip      1        ONLINE  ONLINE       db195                    STABLE
--------------------------------------------------------------------------------

附:集群日志位置:

--如果忘记,可以使用adrci查看日志位置[grid@db195 trace]$ pwd
/u01/app/grid/diag/crs/db195/crs/trace
[grid@db195 trace]$ tail -20f alert.log 
2019-07-31 09:13:53.813 [CRSD(9156)]CRS-8500: Oracle Clusterware CRSD process is starting with operating system process ID 91562019-07-31 09:14:05.943 [CRSD(9156)]CRS-1012: The OCR service started on node db195.2019-07-31 09:14:06.585 [CRSD(9156)]CRS-1201: CRSD started on node db195.2019-07-31 09:14:15.787 [ORAAGENT(9824)]CRS-8500: Oracle Clusterware ORAAGENT process is starting with operating system process ID 98242019-07-31 09:14:16.576 [ORAROOTAGENT(9856)]CRS-8500: Oracle Clusterware ORAROOTAGENT process is starting with operating system process ID 98562019-07-31 09:14:28.516 [ORAAGENT(10272)]CRS-8500: Oracle Clusterware ORAAGENT process is starting with operating system process ID 102722019-07-31 09:21:07.409 [OCTSSD(8378)]CRS-2407: The new Cluster Time Synchronization Service reference node is host db195.2019-07-31 09:21:10.569 [OCSSD(7062)]CRS-1625: Node db193, number 1, was shut down2019-07-31 09:21:10.948 [OCSSD(7062)]CRS-1601: CSSD Reconfiguration complete. Active nodes are db195 .2019-07-31 09:21:11.055 [CRSD(9156)]CRS-5504: Node down event reported for node 'db193'.2019-07-31 09:21:11.292 [CRSD(9156)]CRS-2773: Server 'db193' has been removed from pool 'Free'.2019-07-31 09:22:25.944 [OLOGGERD(21377)]CRS-8500: Oracle Clusterware OLOGGERD process is starting with operating system process ID 213772019-07-31 09:23:41.207 [OCSSD(7062)]CRS-1601: CSSD Reconfiguration complete. Active nodes are db193 db195 .
[grid@db195 trace]$ tail -5f ocssd.trc 
2019-07-31 09:35:40.732 :    CSSD:527664896: [     INFO] clssgmDiscEndpcl: initiating gipcDestroy 0x275532019-07-31 09:35:40.732 :    CSSD:527664896: [     INFO] clssgmDiscEndpcl: completed gipcDestroy 0x275532019-07-31 09:35:42.136 :    CSSD:527664896: [     INFO]   : Processing member data change type 1, size 4 for group HB+ASM, memberID 17:2:22019-07-31 09:35:42.136 :    CSSD:527664896: [     INFO]   : Sending member data change to GMP for group HB+ASM, memberID 17:2:22019-07-31 09:35:42.138 :    CSSD:1010091776: [     INFO] clssgmpcMemberDataUpdt: grockName HB+ASM memberID 17:2:2, datatype 1 datasize 4

至此,19c的GI配置已全部完成。

Linux平台 Oracle 19c RAC安装Part3:DB配置

四、 DB(Database)配置

Linux平台 Oracle 19c RAC安装指导: 
Part1: Linux平台 Oracle 19c RAC安装Part1:准备工作 
Part2: Linux平台 Oracle 19c RAC安装Part2:GI配置 
Part3: Linux平台 Oracle 19c RAC安装Part3:DB配置

本文安装环境: OEL 7.6 + Oracle 19.3 GI & RAC

四、DB(Database)安装

4.1 解压DB的安装包

oracle用户登录,在$ORACLE_HOME下解压db包(19c的db也是像18c一样直接解压到$ORACLE_HOME下,免安装):

[oracle@db193 ~]$ mkdir -p /u01/app/oracle/product/19.3.0/db_1
[oracle@db193 ~]$ cd $ORACLE_HOME
[oracle@db193 db_1]$ pwd/u01/app/oracle/product/19.3.0/db_1
[oracle@db193 db_1]$ ls
[oracle@db193 db_1]$ unzip /u01/media/LINUX.X64_193000_db_home.zip

4.2 DB软件配置

打开Xmanager软件,Oracle用户登录,配置数据库软件。

[oracle@db193 db_1]$ pwd/u01/app/oracle/product/19.3.0/db_1
[oracle@db193 db_1]$ export DISPLAY=192.168.1.31:0.0[oracle@db193 db_1]$ ./runInstaller

下面截取DB软件配置的过程如下: 
Oracle 19c rac的搭建_oracle_21
注:这里选择只安装软件,数据库后面创建好ASM磁盘组后再运行dbca创建。 
Oracle 19c rac的搭建_oracle_22
Oracle 19c rac的搭建_css_23
注:配置好ssh等价性。 
Oracle 19c rac的搭建_oracle_24
Oracle 19c rac的搭建_css_25
Oracle 19c rac的搭建_oracle_26
Oracle 19c rac的搭建_linux_27
Oracle 19c rac的搭建_oracle_28
注:可以进行修复的,按提示执行脚本修复。 
我这里还有swap的问题,因为是测试环境资源有限,可以忽略,如果生产环境,强烈建议调整符合要求。 
如果还有其他的检查项未通过,则无论是生产还是测试环境,都不建议忽略,而应该整改符合要求为止。 
Oracle 19c rac的搭建_数据库_29
Oracle 19c rac的搭建_数据库_30
注:最后root用户按安装提示执行1个脚本,需要在各节点分别执行。 
Oracle 19c rac的搭建_数据库_31
至此,已完成DB软件的配置。

4.3 ASMCA创建磁盘组

打开Xmanager软件,grid用户登录,asmca创建ASM磁盘组

[grid@db193 ~]$ export DISPLAY=192.168.1.31:0.0[grid@db193 ~]$ asmca

使用asmca调用创建磁盘组的图形界面,首先映入眼帘的是鲜艳的19c配色图: 
Oracle 19c rac的搭建_.net_32
然后正式进入asmca的界面: 
Oracle 19c rac的搭建_linux_33
这里我先创建一个DATA磁盘组,一个FRA磁盘组,冗余选择external(生产如果选择external,底层存储必须已经做了RAID)。
Oracle 19c rac的搭建_linux_34
Oracle 19c rac的搭建_oracle_35
Oracle 19c rac的搭建_数据库_36
这里看到新创建的DATA和FRA磁盘组已经创建完成并成功mount。

4.4 DBCA建库

打开Xmanager软件,oracle用户登录,dbca图形创建数据库。 
下面是DBCA建库的过程截图: 
Oracle 19c rac的搭建_css_37
Oracle 19c rac的搭建_oracle_38
Oracle 19c rac的搭建_css_39
Oracle 19c rac的搭建_oracle_40
Oracle 19c rac的搭建_linux_41
注:这里选择是否启用CDB,并定义CDB和PDB的名称。我选择启用CDB,CDB名称为jydb,并自动创建2个PDB,前缀名就叫pdb。 
Oracle 19c rac的搭建_linux_42
注:这里我选择使用OMF,默认也是使用OMF。 
Oracle 19c rac的搭建_css_43
注:这里我暂时没有启用FRA,后续调整开归档时将归档文件放在+FRA磁盘组中。 
Oracle 19c rac的搭建_linux_44
Oracle 19c rac的搭建_数据库_45
注:这里选择内存分配具体值,选择数据库的字符集,我这里没选择,字符集默认是AL32UTF8。需要根据实际情况修改。 
Oracle 19c rac的搭建_.net_46
注:这里可以选择是否配置EM,我这里选择配置,如果你不需要,可以选择不配置。CVU一般也不配置,我这里学习目的选择配置。 
Oracle 19c rac的搭建_数据库_47
注:这里设置密码,我实验环境直接oracle,不符合规范,生产环境建议设置复杂密码。 
Oracle 19c rac的搭建_linux_48
注:这里可以选择将创建数据库的脚本保存下来,根据你的需求,可选可不选。 
Oracle 19c rac的搭建_数据库_49
注:这里如果还有其他的检查未通过,则不能忽略。我这里是因为使用一个scan,对应报错可以忽略。 
Oracle 19c rac的搭建_数据库_50
注:这里是安装信息的概览,建议认真核实,如果有不对的还可以退回去改。确认无误后开始创建数据库。 
Oracle 19c rac的搭建_css_51
注:19c建库的时间还是和18c一样,长到让人崩溃,感觉以后DBA安装过程中可以提前下几个电影来边等边看了。

INFO: Aug 01, 2019 4:42:58 AM oracle.assistants.common.base.driver.AssistantConfigDriver updateExitStatus 
INFO: Total time taken for DBCA operation in mm:ss is 286:16

Oracle 19c rac的搭建_css_52
至此,Oracle 19.3 RAC数据库已经创建成功,目前如果你的企业想上12c系列的数据库,推荐直接选择19c(12c的最终版本12.2.0.3),19c相对18c来说更趋于稳定,Oracle的支持周期也更长。

4.5 验证crsctl的状态

grid用户登录,crsctl stat res -t 查看集群资源的状态,发现各节点的DB资源已经正常Open。

[grid@db193 ~]$ crsctl stat res -t--------------------------------------------------------------------------------Name           Target  State        Server                   State details       
--------------------------------------------------------------------------------Local Resources--------------------------------------------------------------------------------ora.LISTENER.lsnr
               ONLINE  ONLINE       db193                    STABLE
               ONLINE  ONLINE       db195                    STABLE
ora.chad
               ONLINE  ONLINE       db193                    STABLE
               ONLINE  ONLINE       db195                    STABLE
ora.net1.network
               ONLINE  ONLINE       db193                    STABLE
               ONLINE  ONLINE       db195                    STABLE
ora.ons
               ONLINE  ONLINE       db193                    STABLE
               ONLINE  ONLINE       db195                    STABLE--------------------------------------------------------------------------------Cluster Resources--------------------------------------------------------------------------------ora.ASMNET1LSNR_ASM.lsnr(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.CRS.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.DATA.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        ONLINE  OFFLINE                               STABLE
ora.FRA.dg(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        ONLINE  OFFLINE                               STABLE
ora.LISTENER_SCAN1.lsnr      1        ONLINE  ONLINE       db193                    STABLE
ora.asm(ora.asmgroup)      1        ONLINE  ONLINE       db193                    Started,STABLE      2        ONLINE  ONLINE       db195                    Started,STABLE      3        OFFLINE OFFLINE                               STABLE
ora.asmnet1.asmnetwork(ora.asmgroup)      1        ONLINE  ONLINE       db193                    STABLE      2        ONLINE  ONLINE       db195                    STABLE      3        OFFLINE OFFLINE                               STABLE
ora.cvu      1        ONLINE  ONLINE       db193                    STABLE
ora.db193.vip      1        ONLINE  ONLINE       db193                    STABLE
ora.db195.vip      1        ONLINE  ONLINE       db195                    STABLE
ora.jydb.db      1        ONLINE  ONLINE       db193                    Open,HOME=/u01/app/o
                                                             racle/product/19.3.0
                                                             /db_1,STABLE      2        ONLINE  ONLINE       db195                    Open,HOME=/u01/app/o
                                                             racle/product/19.3.0
                                                             /db_1,STABLE
ora.qosmserver      1        ONLINE  ONLINE       db193                    STABLE
ora.scan1.vip      1        ONLINE  ONLINE       db193                    STABLE--------------------------------------------------------------------------------

oracle用户登录,sqlplus / as sysdba

[oracle@db193 ~]$ sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Thu Aug 1 06:49:29 2019
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle.  All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0SQL> select inst_id, name, open_mode from gv$database;
   INST_ID NAME      OPEN_MODE
---------- --------- --------------------
         1 JYDB      READ WRITE
         2 JYDB      READ WRITESQL> show con_idCON_ID
------------------------------
1SQL> show con_nameCON_NAME
------------------------------CDB$ROOTSQL> show pdbs
    CON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------
         2 PDB$SEED                       READ ONLY  NO
         3 PDB1                           READ WRITE NO
         4 PDB2                           READ WRITE NOSQL> alter session set container = pdb2;Session altered.SQL> show pdbs
    CON_ID CON_NAME                       OPEN MODE  RESTRICTED
---------- ------------------------------ ---------- ----------
         4 PDB2                           READ WRITE NOSQL> select name from v$datafile;NAME
--------------------------------------------------------------------------------
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/system.280.1015129671
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/sysaux.281.1015129671
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/undotbs1.279.1015129669
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/undo_2.283.1015129973
+DATA/JYDB/8F00F4E12A9F708DE053C101A8C0395D/DATAFILE/users.284.1015130015SQL>

可以看到所有的资源均正常,至此,整个在OEL 7.6 上安装 Oracle 19.3 GI & RAC 的工作已经全部结束。