Oracle11 集群(一)

发布时间 2024-01-08 14:51:23作者: ZhengLiming

集群安装准备

2019年7月18日

15:29

https://www.cnblogs.com/xshrim/p/5952211.html #参考文章

rac环境

节点1

节点2

ip

10.56.85.235

10.56.85.236

hostname

rac1

rac2

hosts

10.56.85.235 rac1

10.56.85.236 rac2

10.56.85.237 rac1-vip

10.56.85.238 rac2-vip

10.56.85.239 rac-scan

10.56.85.235 rac1

10.56.85.236 rac2

10.56.85.237 rac1-vip

10.56.85.238 rac2-vip

10.56.85.239 rac-scan

free -m

total used free shared buffers cached

Mem: 7985 931 7054 9 1 296

-/+ buffers/cache: 634 7351

Swap: 8095 0 8095

total used free shared buffers cached

Mem: 7985 1076 6909 9 1 346

-/+ buffers/cache: 728 7257

Swap: 8095 0 8095

/etc/fstab

/dev/mapper/vg00-lvroot / xfs defaults 1 1

UUID=fc84949b-89ed-4b98-948e-fc0cfe258681 /boot xfs defaults 1 2

/dev/mapper/vg00-lvswap swap swap defaults 0 0

tmpfs /dev/shm tmpfs defaults,size=7G 0 0

/dev/mapper/vg00-lvroot / xfs defaults 1 1

UUID=fc84949b-89ed-4b98-948e-fc0cfe258681 /boot xfs defaults 1 2

/dev/mapper/vg00-lvswap swap swap defaults 0 0

tmpfs /dev/shm tmpfs defaults,size=7G 0 0

df -h

Filesystem Size Used Avail Use% Mounted on

/dev/mapper/vg00-lvroot 16G 2.9G 14G 19% /

devtmpfs 3.9G 0 3.9G 0% /dev

tmpfs 7.0G 140K 7.0G 1% /dev/shm

tmpfs 3.9G 8.8M 3.9G 1% /run

tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup

/dev/sda1 197M 104M 94M 53% /boot

Filesystem Size Used Avail Use% Mounted on

/dev/mapper/vg00-lvroot 16G 2.9G 14G 19% /

devtmpfs 3.9G 0 3.9G 0% /dev

tmpfs 7.0G 140K 7.0G 1% /dev/shm

tmpfs 3.9G 8.8M 3.9G 1% /run

tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup

/dev/sda1 197M 104M 94M 53% /boot

ip a

[root@localhost ~]# ip a

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

valid_lft forever preferred_lft forever

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000

link/ether 00:50:56:99:14:47 brd ff:ff:ff:ff:ff:ff

inet 10.56.85.235/24 brd 10.56.85.255 scope global ens192

valid_lft forever preferred_lft forever

inet6 fe80::250:56ff:fe99:1447/64 scope link

valid_lft forever preferred_lft forever

3: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:50:56:99:42:ba brd ff:ff:ff:ff:ff:ff

inet 172.25.1.10/24 brd 172.25.1.255 scope global ens33

valid_lft forever preferred_lft forever

inet6 fe80::250:56ff:fe99:42ba/64 scope link

valid_lft forever preferred_lft forever

1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN

link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00

inet 127.0.0.1/8 scope host lo

valid_lft forever preferred_lft forever

inet6 ::1/128 scope host

valid_lft forever preferred_lft forever

2: ens192: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000

link/ether 00:50:56:99:26:4b brd ff:ff:ff:ff:ff:ff

inet 10.56.85.236/24 brd 10.56.85.255 scope global ens192

valid_lft forever preferred_lft forever

inet6 fe80::250:56ff:fe99:264b/64 scope link

valid_lft forever preferred_lft forever

3: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000

link/ether 00:50:56:99:6d:0e brd ff:ff:ff:ff:ff:ff

inet 172.25.1.11/24 brd 172.25.1.255 scope global ens33

valid_lft forever preferred_lft forever

inet6 fe80::250:56ff:fe99:6d0e/64 scope link

valid_lft forever preferred_lft forever

iostat -c

Linux 3.10.0-123.el7.x86_64 (localhost.localdomain) 07/18/2019 _x86_64_ (2 CPU)

 

avg-cpu: %user %nice %system %iowait %steal %idle

6.62 0.02 0.71 0.20 0.00 92.44

Linux 3.10.0-123.el7.x86_64 (localhost.localdomain) 07/18/2019 _x86_64_ (2 CPU)

 

avg-cpu: %user %nice %system %iowait %steal %idle

9.11 0.03 1.05 0.17 0.00 89.64

 

 

 

 

 

 

 

 

 

关闭所有防火墙, selinux, iptables, firewalld

 

资源限额配置

为grid和oracle配置资源限额。

 

#所有节点均执行

[root@rac1 ~]# vim /etc/sysctl.conf

fs.aio-max-nr = 1048576

fs.file-max = 6815744

kernel.shmall = 4294967296

kernel.shmmax = 6871947673  #该值为=实际内存*1024*1024*1024*0.8(只取物理内存80%)

kernel.shmmni = 4096

kernel.sem = 250 32000 100 128

net.ipv4.ip_local_port_range = 9000 65500

net.core.rmem_default = 262144

net.core.rmem_max = 4194304

net.core.wmem_default = 262144

net.core.wmem_max = 1048586

[root@rac1 ~]# sysctl -p #加载配置文件生效

[root@rac1 ~]# vim /etc/security/limits.conf

grid soft nofile 1024

grid hard nofile 65536

grid soft nproc 2047

grid hard nproc 16384

grid soft stack 10240

grid hard stack 32768

oracle soft nofile 1024

oracle hard nofile 65536

oracle soft nproc 2047

oracle hard nproc 16384

oracle soft stack 10240

oracle hard stack 32768

[root@rac1 ~]# vim /etc/pam.d/login

session required pam_limits.so

调整tmpfs大小

如果内存较大,sga target不能超过tmpfs大小,因此需要将tmpfs调整到一个大于sga target的值。编辑/etc/fstab,修改tmpfs行:

初次修改sga_target为0,变成手动管理后,sga_max_size参数也会随之变动

[root@rac1 ~]# cat /etc/fstab | grep '/dev/shm'

tmpfs /dev/shm tmpfs defaults,size=7G 0 0

[root@rac1 ~]# mount -o remount /dev/shm/

[root@rac1 ~]# df -h | grep '/dev/shm' #保证可用空间需要接近物理内存

tmpfs 7.0G 80K 7.0G 1% /dev/shm

 

用户和组配置

新建grid和oracle相关用户和组。

 

所有节点均执行,且需保证所有节点对应的组和用户id相同

groupadd -g 5001 oinstall

groupadd -g 5002 dba

groupadd -g 5003 oper

groupadd -g 5004 asmdba

groupadd -g 5005 asmadmin

groupadd -g 5006 asmoper

useradd -u 5001 -g oinstall -G asmadmin,asmdba,asmoper,dba -d /home/grid -s /bin/bash -c "Grid Infrastructure Owner" grid

useradd -u 5002 -g oinstall -G asmadmin,asmdba,dba,oper -d /home/oracle -s /bin/bash -c "Oracle Software Owner" oracle

echo "grid" | passwd --stdin grid

echo "oracle" | passwd --stdin oracle

新建grid和oracle安装目录

mkdir -p /u01/grid/product/grid11g

mkdir -p /u01/11.2.4/grid

mkdir -p /u01/oracle/product/db11g

chown -R grid:oinstall /u01

chown -R oracle:oinstall /u01/oracle

chmod -R 775 /u01

配置grid和oracle用户环境变量

[root@rac1 ~]# vim /home/grid/.bash_profile

export TMP=/tmp

export TMPDIR=$TMP

export ORACLE_BASE=/u01/grid

export ORACLE_HOME=/u01/11.2.4/grid

#export ORACLE_HOME=$ORACLE_BASE/product/grid11g

export ORACLE_SID=+ASM1 #节点1为+ASM1,节点2为+ASM2,以此类推

export PATH=$HOME/bin:$ORACLE_HOME/bin:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin:$ORACLE_BASE/common/oracle/bin:$PATH

export LD_LIBRARY_PATH=$ORACLE_HOME/lib:$ORACLE_HOME/oracm/lib:/usr/lib:/usr/local/lib

export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib:$ORACLE_HOME/network/jlib:$ORACLE_HOME/oracm/lib

[root@rac1 ~]# vim /home/oracle/.bash_profile

export TMP=/tmp

export TMPDIR=$TMP

export ORACLE_BASE=/u01/oracle

export ORACLE_HOME=$ORACLE_BASE/product/db11g

export ORACLE_SID=nodedba1 #此处指定oracle实例名。单实例情况下可指定ORACLE_SID,多实例情况下建议不指定(集群模式会自动根据数据库名称后面接数字, 递增)

export PATH=/usr/sbin:$PATH

export PATH=$ORACLE_HOME/bin:$PATH

export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib

export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib

 

SSH互信配置

配置所有节点间ssh无密码登录,保证节点间文件互传。需要分别配置grid和oracle用户的ssh互信,root用户可配可不配。本项目只有两个节点:rac1, rac2。以配置grid用户的ssh互信为例:

[oracle@rac1 ~]$ ssh-keygen #提示输入时一路回车即可

[oracle@rac1 ~]$ ssh-copy-id -i .ssh/id_rsa.pub oracle@rac2

[oracle@rac1 ~]$ ssh rac2 hostname #检验互信是否成功

rac2

[grid@rac1 ~]$ ssh-keygen #提示输入时一路回车即可

[grid@rac1 ~]$ ssh-copy-id -i .ssh/id_rsa.pub grid@rac2

[grid@rac1 ~]$ ssh rac2 hostname #检验互信是否成功

rac2

以上只做了rac1的oracle和grid ssh互信, 还需要在rac2上oracle和grid用户对rac1做互信.

[grid@orcl1 sshsetup]$ pwd

/u01/database/sshsetup

[grid@orcl1 sshsetup]$ ./sshUserSetup.sh -user grid -hosts "orcl1 orcl2" -advanced -exverify –confir

[grid@orcl1 sshsetup]$ ./sshUserSetup.sh -user oracle -hosts "orcl1 orcl2" -advanced -exverify –confir

 

依赖包安装

Linux平台下oracle rac依赖众多软件包,需要使用yum源进行安装。

x64包安装:

#所有节点均执行
[root@rac1 ~]#

yum install -y binutils-* compat-db* compat-db-* control-center* gcc-* gcc-c++-* glibc-* glibc-common-* gnome-libs-* make-* pdksh* sysstat-* xscreensaver-* make-* gdb-6.1post-* glibc-* glibc-common-* glibc-devel-* glibc-devel-* compat-gcc-* compat-gcc-c++-* compat-libstdc++-* compat-libstdc++-devel-* gnome-libs-* libstdc++-* libstdc++-devel-* openmotif-* sysstat-* setarch-* libaio-* libaio-devel-* libXp* libXp-devel* elfutils-libelf-devel* unixODBC-* unixODBC-devel-* compat-libcap1.x86_64 libcap.so.1 sg3_utils

x86包安装:

[root@rac1 ~]# yum install -y compat-libstdc++-33-3.2.3-69.el6.i686 glibc-devel-2.12-1.132.el6.i686 libaio-devel-0.3.107-10.el6.i686 libstdc++-4.4.7-4.el6.i686 unixODBC-2.2.14-12.el6_3.i686 unixODBC-devel-2.2.14-12.el6_3.i686 libaio-0.3.107-10.el6.i686

[root@rac1 yum.repos.d]# rpm -ivh compat-libstdc++-33-3.2.3-61.x86_64.rpm

[root@rac1 yum.repos.d]# rpm -ivh pdksh-5.2.14-37.el5_8.1.x86_64.rpm #该包可以使用RHEL5.9镜像内的pdksh

关键包安装即可

yum -y install bc \

binutils \

compat-libcap1 \

compat-libstdc++-33 \

glibc \

glibc-devel \

ksh \

libaio \

libaio-devel \

libX11 \

libXau \

libXi \

libXtst \

libXrender-devel \

libXrender \

libgcc \

libstdc++ \

libstdc++-devel \

libxcb \

targetcli \

make \

nfs-utils \

net-tools \

python \

python-configshell \

python-rtslib \

python-six \

kernel-headers \

pyparsing \

smartmontools \

sysstat \

targetcli \

gcc \

gcc-c++ \

tigervnc-server \

libmpc \

cpp

 

共享存储配置

多路径配置

存储资源以光纤通道的方式连接到服务器后,系 统会自动识别,但由于多路径问题,会将存储中分配的一个LUN识别为多个磁盘设备,因此需要安装并配置多路径软件将存储分配的LUN与系统磁盘设备一一对 应。所有LUN均有一个唯一的scsi_id,因此可以通过scsi_id判断多个路径对应了同一设备,实现LUN与设备的一一对应。

安装/配置多路径软件multipath:

#所有节点均执行

[root@rac1 ~]# rpm -qa | grep multipath

device-mapper-multipath-0.4.9-66.el7.x86_64

device-mapper-multipath-libs-0.4.9-66.el7.x86_64

[root@rac1 ~]# systemctl enable multipathd.service

[root@rac1 ~]# systemctl restart multipathd.service

由于使用的测试环境, 在本地搭建的iscsi共享, iscsi做法忽略

[root@rac1 ~]# iscsiadm -m session

tcp: [1] 10.56.85.242:3260,1 iqn.2019-07.com.example.store4 (non-flash)

tcp: [2] 10.56.85.242:3260,1 iqn.2019-07.com.example.store5 (non-flash)

[root@rac1 ~]# lsblk

[root@rac1 ~]# cat /etc/multipath.conf | grep -v '^$\|^#'

defaults {

polling_interval 30

failback immediate

no_path_retry 5

rr_min_io 100

path_checker tur

user_friendly_names yes #启用默认命名

}

multipaths {

multipath {

wwid 3600140512ad42606d4c4892bc3b673e9 #使用uuid

alias data1 #将uuid的设备使用别名sdb

}

multipath {

wwid 360014057ed3488c5883437c91af27037

alias data2

}

}

UDEV配置

将多路径映射出的磁盘标记为oracle的asm磁盘裸/块设备。

#/lib/udev/scsi_id -g -u /dev/sdb 查看裸设备的uuid

[root@rac1 ~]# cat /etc/udev/rules.d/99-oracle-asmdevices.rules

NEL=="sd*", ENV{DEVTYPE}=="disk", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode", RESULT=="3600140512ad42606d4c4892bc3b673e9", RUN+="/bin/sh -c 'mknod /dev/asmdata1 b $major $minor; chown grid:asmadmin /dev/asmdata1; chmod 0660 /dev/asmdata1'"

NEL=="sd*", ENV{DEVTYPE}=="disk", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id -g -u -d $devnode", RESULT=="360014057ed3488c5883437c91af27037", RUN+="/bin/sh -c 'mknod /dev/asmdata2 b $major $minor; chown grid:asmadmin /dev/asmdata2; chmod 0660 /dev/asmdata2'"

[root@rac1 ~]# udevadm control --reload-rules

[root@rac1 ~]# udevadm trigger

[root@rac1 ~]# ll /dev/ | grep asm*  #如果识别不出来, 使用KERNEL=="dm-*"等同于NEL=="sd*"