Oracle 11g RAC部署

发布时间 2023-07-05 14:30:41作者: liuxing666

Oracle 11g RAC部署

  • 操作环境:CentOS 7.4 + Oracle 11.2.0.4 + UDEV

一、主机网络规划

网络配置 节点1 节点2
主机名称 rac1 rac2
public ip 10.0.0.21 10.0.0.22
private ip 172.16.1.21 172.16.1.22
vip 10.0.0.23 10.0.0.24
scan ip 10.0.0.25
  • Public IP:服务器物理网卡IP,即对外提供服务的IP

  • Priviate IP:服务器内网、私网IP,主要用于数据库间同步及心跳

  • IP:虚拟IP,与Public IP处于同一个网段

  • Scan IP:11g后创建集群新增的功能,集群对外服务提供的VIP

两台主机间需要挂载的磁盘是共享磁盘

二、操作系统配置

1.修改主机名(两个节点)

hostnamectl set-hostname rac1
hostnamectl set-hostname rac2

2.关闭防火墙(两个节点)

systemctl stop firewalld
systemctl disable firewalld

2.关闭selinux(两个节点)

sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
setenforce 0
getenforce 

3.hosts文件配置(两个节点)

vim /etc/hosts
#public ip ent0
10.0.0.21  rac1
10.0.0.22  rac2
#priv ip ent1
172.16.1.21 rac1-prv
172.16.1.22  rac2-prv
#vip ip
10.0.0.23  rac1-vip
10.0.0.24  rac2-vip
#scan ip
10.0.0.25  rac-scan

4.依赖包配置(两个节点)

yum install binutils  -y
yum install compat-libcap1 -y
yum install compat-libstdc++-33 -y
yum install gcc -y
yum install gcc-c++ -y
yum install glibc -y
yum install glibc-devel -y
yum install ksh -y
yum install libaio -y
yum install libaio-devel -y
yum install libgcc -y
yum install libstdc++- -y
yum install libstdc++-devel -y
yum install libXi -y
yum install libXtst -y
yum install make -y
yum install sysstat -y
yum install zlib-devel -y
yum install elfutils-libelf-devel -y

5.关闭时间同步ntpd服务(两个节点)

#检查两节点时间,时区是否相同,并禁止ntp

systemctl disable ntpd.service
systemctl stop ntpd.service
mv /etc/ntp.conf /etc/ntp.conf.orig
systemctl status ntpd

/sbin/ntpdate ntp1.aliyun.com

6.创建用户及用户组(两个节点)

groupadd -g 501 oinstall
groupadd -g 502 dba
groupadd -g 503 oper
groupadd -g 504 asmadmin
groupadd -g 505 asmoper
groupadd -g 506 asmdba
useradd -g oinstall -G dba,asmdba,oper oracle
useradd -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid

#设置密码
passwd grid         12366
passwd oracle       12366


7.创建文件目录(两个节点)

mkdir -p /u01/app/grid
mkdir -p /u01/app/11.2.0/grid
chown -R grid:oinstall /u01
mkdir -p /u01/app/oraInventory
chown -R grid:oinstall /u01/app/oraInventory
mkdir -p /u01/app/oracle
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01

8.修改20-nproc.conf(两个节点)

[root@rac1 ~]vim /etc/security/limits.d/20-nproc.conf
# Change this
#注释这行*          soft    nproc    1024
# To this增加下面一行
* - nproc 16384

9.修改limits.conf(两个节点)

#在/etc/security/limits.conf添加如下内容
vim /etc/security/limits.conf
#ORACLE SETTING
grid                 soft    nproc   2047
grid                 hard    nproc   16384
grid                 soft    nofile  1024
grid                 hard    nofile  65536
grid                 soft    stack   10240
grid                 hard    stack   32768
oracle               soft    nproc   2047
oracle               hard    nproc   16384
oracle               soft    nofile  1024
oracle               hard    nofile  65536
oracle               soft    stack   10240
oracle               hard    stack   32768

10.修改login(两个节点)

#在/etc/pam.d/login添加如下内容
vim /etc/pam.d/login
#ORACLE SETTING
session    required     pam_limits.so

11.修改/etc/systemd/logind.conf文件(两个节点)

vim /etc/systemd/logind.conf 
RemoveIPC=no 

systemctl daemon-reload 
systemctl restart systemd-logind
	

12.修改sysctl.conf(两个节点)

vim /etc/sysctl.conf
#ORACLE SETTING
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmmax = 3865470566   
kernel.shmall = 2097152 
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586

#是修改生效
sysctl -p

13.停止avahi-daemon服务(两个节点)

systemctl disable avahi-daemon.socket
systemctl disable avahi-daemon.service
ps -ef|grep avahi-daemon
kill -9 pid avahi-daemon

14.添加NOZEROCONF=yes参数(两个节点)

vim /etc/sysconfig/network
NOZEROCONF=yes

15.修改grid用户环境变量

#节点1
[root@rac1 ~]# su - grid

vim /home/grid/.bash_profile

umask 022
export ORACLE_SID=+ASM1
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_UNQNAME=oracle_rac			#node1&node2一定要保持一致
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_TERM=xterm
export NLS_LANG=AMERICAN_AMERICA.UTF8
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi
cd /u01/app/11.2.0/grid/bin
#节点2
[root@rac2 ~]# su - grid

vim /home/grid/.bash_profile

umask 022
export ORACLE_SID=+ASM2
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export ORACLE_UNQNAME=oracle_rac			#node1&node2一定要保持一致
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_TERM=xterm
export NLS_LANG=AMERICAN_AMERICA.UTF8
if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi

16.修改oracle环境变量

#节点1
[root@rac1 ~]# su - oracle
[oracle@rac1:/home/oracle]$ vim ~/.bash_profile
export LANG=en_US
export ORACLE_SID=rac1
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_TERM=xterm
export NLS_LANG=AMERICAN_AMERICA.UTF8
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi

cd /u01/app/oracle/product/11.2.0/db_1/bin
#节点2
[root@rac2 ~]# su - oracle
[oracle@rac2:/home/oracle]$ vim ~/.bash_profile
export LANG=en_US
export ORACLE_SID=rac2
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export PATH=$ORACLE_HOME/bin:$PATH
export ORACLE_TERM=xterm
export NLS_LANG=AMERICAN_AMERICA.UTF8
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

if [ $USER = "oracle" ] || [ $USER = "grid" ]; then
        if [ $SHELL = "/bin/ksh" ]; then
            ulimit -p 16384
              ulimit -n 65536
  else
   ulimit -u 16384 -n 65536
      fi
    umask 022
fi

https://blog.csdn.net/ldjjbzh626/article/details/103174891

三、VMware创建共享磁盘

1.添加硬盘

image-20230423124124970

2.选择磁盘类型

image-20230423124225485

3.创建新的虚拟磁盘

image-20230423124305573

3.指定磁盘容量

image-20230423124425275

5.指定磁盘文件存放位置

image-20230423124730205

6.硬盘高级设置

image-20230423124938735

分别配置其他硬盘后,展示如下

image-20230423125526917 image-20230423125617832
  • 这里创建了3个2GB的注册盘(ocr),一个10GB的数据盘(data),一个5GB的备份盘(backup)
  • 在rac2中添加rac1中新增的共享硬盘
image-20230423125927004

7.修改虚拟机vmx文件

  • 在虚拟机的vmx文件中添加配置信息
scsi1.sharedBus= "virtual"
disk.locking= "false"
diskLib.dataCacheMaxSize= "0"
diskLib.dataCacheMaxReadAheadSize= "0"
diskLib.DataCacheMinReadAheadSize= "0"
diskLib.dataCachePageSize= "4096"
diskLib.maxUnsyncedWrites= "0"
image-20230423130401182

四、安装并配置ASM

推荐用udev绑定存储

1.安装ASM软件包(两个节点)

rpm -ivh kmod-oracleasm-2.0.8-28.el7.x86_64.rpm
rpm -ivh oracleasmlib-2.0.12-1.el7.x86_64.rpm
rpm -ivh oracleasm-support-2.1.11-2.el7.x86_64.rpm

2.配置ASM,确认已经为启用状态(两个节点)

#配置ASM
[root@rac1 rpm]# oracleasm configure -i
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done

#确认已经为启用状态
[root@rac1 rpm]# oracleasm configure
ORACLEASM_ENABLED=true
ORACLEASM_UID=grid
ORACLEASM_GID=asmadmin
ORACLEASM_SCANBOOT=true
ORACLEASM_SCANORDER=""
ORACLEASM_SCANEXCLUDE=""
ORACLEASM_USE_LOGICAL_BLOCK_SIZE="false"

#重启
/etc/init.d/oracleasm restart

3.创建磁盘分区(节点1 rac1)

[root@rac1 ~]# fdisk /dev/sdb
Welcome to fdisk (util-linux 2.23.2).

Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.

Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0xb5807fbc.

Command (m for help): n
Partition type:
   p   primary (0 primary, 0 extended, 4 free)
   e   extended
Select (default p): p
Partition number (1-4, default 1): 1
First sector (2048-10485759, default 2048): 
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-10485759, default 10485759): 
Using default value 10485759
Partition 1 of type Linux and of size 5 GiB is set

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

#重复上述步骤,直到完成
#查看
[root@rac1 ~]# lsblk
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sda      8:0    0   20G  0 disk 
├─sda1   8:1    0    1G  0 part /boot
├─sda2   8:2    0    1G  0 part [SWAP]
└─sda3   8:3    0   18G  0 part /
sdb      8:16   0    5G  0 disk 
└─sdb1   8:17   0    5G  0 part 
sdc      8:32   0   10G  0 disk 
└─sdc1   8:33   0   10G  0 part 
sdd      8:48   0    2G  0 disk 
└─sdd1   8:49   0    2G  0 part 
sde      8:64   0    2G  0 disk 
└─sde1   8:65   0    2G  0 part 
sdf      8:80   0    2G  0 disk 
└─sdf1   8:81   0    2G  0 part 
sr0     11:0    1  4.2G  0 rom  /mnt/cdrom

4.创建ASM磁盘(节点1)

[root@rac1 ~]# oracleasm createdisk backup /dev/sdb1
Writing disk header: done
Instantiating disk: done

oracleasm createdisk data /dev/sdc1
oracleasm createdisk ocr1 /dev/sdd1
oracleasm createdisk ocr2 /dev/sde1
oracleasm createdisk ocr3 /dev/sdf1

#当创建不成功时可以重启一下系统  
reboot
#或者
oracleasm init
oracleasm status

5.在节点2扫描创建的磁盘(节点2)

oracleasm scandisks
oracleasm listdisks

#扫描遇到如下问题可以重启一下系统再重新扫描
reboot
#或者
oracleasm init
oracleasm status
[root@rac2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
Instantiating disk "BACKUP"
Unable to instantiate disk "BACKUP"
Instantiating disk "DATA"
Unable to instantiate disk "DATA"
Instantiating disk "OCR3"

[root@rac2 ~]# oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...
[root@rac2 ~]# oracleasm listdisks
BACKUP
DATA
OCR1
OCR2
OCR3

#查看ASM磁盘所属磁盘
oracleasm querydisk -p DATA

五、配置ssh互信

1.生成密钥对(两个节点)

su - oracle

rm -rf ~/.ssh && mkdir ~/.ssh && chmod 700 ~/.ssh && ssh-keygen -t rsa && ssh-keygen -t dsa

su - grid

rm -rf ~/.ssh && mkdir ~/.ssh && chmod 700 ~/.ssh && ssh-keygen -t rsa && ssh-keygen -t dsa

#直接回车,无密码模式
  1. 分发秘钥
#节点1
su - oracle
#将每个节点的秘钥重定向到authorized_keys文件中
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
#将秘钥拷贝到节点2
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
#赋权
chmod 600 ~/.ssh/authorized_keys

su - grid
#将每个节点的秘钥重定向到authorized_keys文件中
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
ssh rac2 cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
#将秘钥拷贝到节点2
scp ~/.ssh/authorized_keys rac2:~/.ssh/authorized_keys
#赋权
chmod 600 ~/.ssh/authorized_keys


#节点2
su - oracle
chmod 600 ~/.ssh/authorized_keys

su - grid
chmod 600 ~/.ssh/authorized_keys

3.测试(两个节点)

su - oracle
ssh rac1 date && ssh rac2 date && ssh rac1-prv date && ssh rac2-prv date

su - grid
ssh rac1 date && ssh rac2 date && ssh rac1-prv date && ssh rac2-prv date

六、grid集群软件安装

1.上传安装包 /app/tools(节点1)

unzip p13390677_112040_Linux-x86-64_3of7.zip 

2.安装集群CVUQDISK包(两个节点)

[root@rac1 u01]#  cd  grid/rpm
[root@rac1 rpm]#  export CVUQDISK_GRP=oinstall
[root@rac1 rpm]#  rpm -ivh cvuqdisk-1.0.9-1.rpm 

[root@rac1 rpm]# scp  cvuqdisk-1.0.9-1.rpm  rac2:/u01

3.进入grid集群软件目录执行安装(节点1)

su - grid
cd /u01/grid/
#安装前检查
./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -fixup -verbose

#根据错误提示解决问题

  • 缺包
rpm -ivh pdksh-5.2.14-37.el5.x86_64.rpm --nodeps --force
  • 报错swap空间不足
#通过dd命令创建一个临时的swap file,大小为1Gb
dd if=/dev/zero of=/home/oracle/swap.file bs=1024k count=1024
#通过mkswap命令格式化步骤1中创建的临时交换文件
mkswap /home/oracle/swap.file
#通过swapon命令使swap文件生效
swapon /home/oracle/swap.file
swapoff /home/oracle/swap.file
echo "/home/oracle/swap.file swap                    swap    defaults        0 0">>/etc/fstab

表示检查成功 Pre-check for cluster services setup was successful.

4.安装前准备 ,Centos7已经不通过initd管理进程了,而是通过systemd进行管理(两个节点)

#创建文件
touch /usr/lib/systemd/system/ohas.service
chmod 777 /usr/lib/systemd/system/ohas.service
vim /usr/lib/systemd/system/ohas.service
#添加如下内容
[Unit]
Description=Oracle High Availability Services
After=syslog.target

[Service]
ExecStart=/etc/init.d/init.ohasd run >/dev/null 2>&1 Type=simple
Restart=always

[Install]
WantedBy=multi-user.target

#启动服务
systemctl daemon-reload
systemctl enable ohas.service
systemctl start ohas.service
systemctl status ohas.service

#检查是否有文件产生  /etc/init.d目录下有生产init.ohasd服务文件
watch -n1 -d ls -l "ls -l /etc/init.d"

#如果有立即执行如下命令
systemctl enable ohas.service && systemctl start ohas.service && systemctl status ohas.service

5.安装(节点1)

#进入到grid目录
su  - grid

cd /u01/grid/
export DISPLAY=10.0.0.1:0.0
export LANG=en_US
xhost +
#xhost + 需要通过xmanager管理

#安装
./runInstaller

#Could not execute auto check for display colors using command /usr/bin/xdpyinfo.
 yum -y install xdpyinfo
image-20230423150731165 image-20230423150846194 image-20230423150927259 image-20230423150954886 image-20230423151130871 image-20230423151347052 image-20230423151426090 image-20230423151507557 image-20230423151749289 image-20230423151837154 image-20230423151907137

下面两步选择默认,然后点击install

  • 当页面提示执行root.sh时(两个节点)
#一定要用root执行
#先执行rac01     
/u01/app/oraInventory/orainstRoot.sh
#再执行rac02     
/u01/app/oraInventory/orainstRoot.sh
#然后,先执行rac01     
/u01/app/11.2.0/grid/root.sh
#再执行 rac02     
/u01/app/11.2.0/grid/root.sh

#检查是否有文件产生  /etc/init.d目录下有生产init.ohasd服务文件

watch -n1 -d ls -l "ls -l /etc/init.d/"

#如果有立即执行如下命令
systemctl enable ohas.service && systemctl start ohas.service && systemctl status ohas.service

#为了解决root.sh脚本报错

Configure Oracle Grid Infrastructure for a Cluster ... succeeded 表示脚本执行成功,继续下一步操作

6.测试grid安装

su - grid
#检查crs状态
crsctl check crs

#检查Clusterware资源
crs_stat -t -v

#检查集群节点
olsnodes -n

#检查两个节点上的Oracle TNS监听器进程
ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print$9}'

#查看ASM状态
srvctl status asm -a

7.为数据和快速恢复区创建ASM磁盘组(节点1操作)

# 添加DATA 和BACKUP
su - grid
export DISPLAY=10.0.0.1:0.0
export LANG=en_US
#调出磁盘组界面
asmca

七、安装oracle database软件

#节点1
unzip p13390677_112040_Linux-x86-64_1of7.zip
unzip p13390677_112040_Linux-x86-64_2of7.zip

#解压之后注意修改目录的权限
chown -R oracle:oinstall /u01/database/
su - oracle
cd /u01/database

[oracle@rac1 database]$ export DISPLAY=10.0.0.1:0.0
[oracle@rac1 database]$ export LANG=en_US
[oracle@rac1 database]$ ./runInstaller 
  • 图形化安装oracle数据库
    • 跳过软件更新
    • 选择”仅安装数据库服务”
    • 选择集群安装数据库
    • 其他基本默认

八、数据库建库

su - oracle
dbca
image-20230423204526370 image-20230423205143321
#当提示监听没有启动时,需要启动监听
srvctl start listener
srvctl status listener
image-20230423205941049

当报这个错时:Counld not connect to ASM due to following error,ora-12547:TNS:lost comact

解决办法

将$ORACLE_HOME/bin下的oracle都赋6751权限:

​ su - oracle
​ su - grid

​ chmod 6751 $ORACLE_HOME/bin/oracle

[grid@rac1 ~]$ cd /u01/app/11.2.0/grid/bin
[grid@rac1 bin]$  ll oracle
-rwxr-x--x 1 grid oinstall 209840344 Apr 23 15:21 oracle
[grid@rac1 bin]$ chmod 6751 oracle
[grid@rac1 bin]$  ll oracle
-rwsr-s--x 1 grid oinstall 209840344 Apr 23 15:21 oracle
image-20230424092258208 image-20230424092427639 image-20230424092525751

安装完成,查看数据库运行

[oracle@rac1 bin]$ srvctl status database -d oracle_rac 
Instance rac1 is running on node rac1
Instance rac2 is running on node rac2

(8条消息) Oracle 11G 11.2.0.4 RAC部署参考指南_centos7.4 +oracle 11.2.0.4 rac_iverycd的博客-CSDN博客

九、常用命令

#1.检查所有节点的时间在集群中是否一致
cluvfy comp clocksync -n all -verbose

#2.查看所有节点的状态
crsctl check cluster -all

#3.查看ocr盘的状况
ocrcheck

#4.查看votedisk盘的状况
crsctl query css votedisk

#5 节点应用程序配置信息(VIP、GSD、ONS、监听器)
srvctl config nodeapps -a -g -s -l

#6.查看所有Oracle实例(数据库状态)
srvctl status database -d oracle_rac

#7 检查单个实例状态
srvctl status instance -d oracle_rac -i rac1

#8 节点应用程序状态
srvctl status nodeapps

#9 列出所有的配置数据库
srvctl config database

#10 数据库配置
srvctl config database -d oracle_rac -a

#11 ASM状态以及ASM配置
srvctl status asm
srvctl status asm -a

#12 TNS监听器状态以及配置
srvctl status listener
srvctl config listener -a

#14 SCAN状态以及配置
srvctl status scan
srvctl config scan

#15 数据库与实例启动
srvctl status database -d oracle_rac# 查看
srvctl start database -d oracle_rac# 启动数据库
srvctl stop database -d oracle_rac# 关闭数据库

srvctl status instance -d oracle_rac -i rac1
srvctl start instance -d oracle_rac -i rac1 # 启动指定的实例
srvctl stop instance -d oracle_rac -i rac2 # 关闭指定实例

#16 网络相关状态
srvctl status listener # 检查TNS listener的状态
srvctl config scan # SCAN的配置
srvctl status scan # SCAN listener状态, 包含当前运行节点的信息

#17 检查VIP的配置及状态
srvctl status vip -n rac1
srvctl config vip -n rac1


crsctl status cluster -all

十、安装常见问题解决

1.Oracle software安装过程中报错 ins_emagent.mk Error in invoking target 'agent nmhs' of makefile '/u01/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk'

vim  /u01/app/oracle/product/11.2.0/db_1/sysman/lib/ins_emagent.mk
#查找 
$(MK_EMAGENT_NMECTL)

#添加
$(MK_EMAGENT_NMECTL) -lnnz11

十一、集群的关闭与启动

1.集群的关闭

#1.关闭数据库 用oracle用户执行srvctl命令
srvctl stop database -d oracle_rac
#查看状态
srvctl status database -d oracle_rac

#2.停止HAS(High Availability Services),必须以root用户操作
cd /u01/app/11.2.0/grid/bin
./crsctl stop has 
./crsctl check has 

#3.停止节点集群服务,必须以root用户
cd /u01/app/11.2.0/grid/bin
./crsctl stop cluster -all
./crsctl check cluster -all
#4.检查集群进程状态
./crsctl check cluster
./crsctl check crs
./crs_stat -t -v

2.集群的启动

#1.启动HAS(High Availability Services),必须以root用户 has启动命令需要在每个节点分别执行
cd /u01/app/11.2.0/grid/bin
./crsctl start has
./crsctl check has
#2.启动集群
cd /u01/app/11.2.0/grid/bin
./crsctl start cluster -all   #所有节点同时启动 
./crsctl check cluster -all   #所有节点同时启动 
./crsctl start cluster -n rac1  #启动指定节点

#3.启动数据库  用oracle用户执行srvctl命令
srvctl start database -d oracle_rac 

#4.详细输出资源全名称并检查状态
./crsctl status resource -t
./crsctl status resource
./crsctl check cluster -all
./crsctl check crs
./crs_stat -t -v