ybaidukuai 发表于 2018-5-10 11:13:29

redhat6 + 11G RAC 双节点部署

  
一、配置网络环境
  node1
  #vi/etc/sysconfig/network
  NETWORKING=yes
  NETWORKING_IPV6=no
  HOSTNAME=node1
  
  # vi/etc/sysconfig/network-scripts/ifcfg-eth0
  # Intel Corporation 82540EM GigabitEthernet Controller
  DEVICE=eth0
  BOOTPROTO=static
  IPADDR=192.168.10.41
  NETMASK=255.255.255.0
  GATEWAY=192.168.10.1
  ONBOOT=yes
  
  #vi/etc/sysconfig/network-scripts/ifcfg-eth1
  # Intel Corporation 82540EM GigabitEthernet Controller
  DEVICE=eth1
  BOOTPROTO=static
  IPADDR=10.10.10.41
  NETMASK=255.255.255.0
  ONBOOT=yes
  
  #vi/etc/hosts
  # Do not remove the following line, orvarious programs
  # that require network functionality willfail.
  127.0.0.1 localhost
  ::1             localhost6.localdomain6 localhost6
  192.168.10.41 node1
  192.168.10.43 node1-vip
  10.10.10.41 node1-priv
  
  192.168.10.42 node2
  192.168.10.44 node2-vip
  10.10.10.42 node2-priv
  
  192.168.10.55 rac_scan
  
  #service network restart
  
  
  (node2与node1基本相同,IP和主机名不同)
  
二、建立用户、组、oracle和grid文件夹
  node1
  #vimkuser.sh
  groupadd-g 200 oinstall
  groupadd-g 201 dba
  groupadd-g 202 oper
  groupadd-g 203 asmadmin
  groupadd-g 204 asmoper
  groupadd-g 205 asmdba
  useradd-u 200 -g oinstall -G dba,asmdba,oper oracle
  useradd-u 201 -g oinstall -G asmadmin,asmdba,asmoper,oper,dba grid

  #shmkuser.sh

  #vimkdir.sh
  mkdir-p /u01/app/oraInventory
  chown-R grid:oinstall /u01/app/oraInventory/
  chmod-R 775 /u01/app/oraInventory/
  mkdir-p /u01/11.2.0/grid
  chown-R grid:oinstall /u01/11.2.0/grid/
  chmod-R 775 /u01/11.2.0/grid/
  mkdir-p /u01/app/oracle
  mkdir-p /u01/app/oracle/cfgtoollogs
  mkdir-p /u01/app/oracle/product/11.2.0/db_1
  chown-R oracle:oinstall /u01/app/oracle
  chmod-R 775 /u01/app/oracle
  
  #shmkdir.sh
  
  #passwdoracle
  
  #passwdgrid
  
  # id oracle
  uid=200(oracle)gid=200(oinstall) groups=200(oinstall),201(dba),202(oper),205(asmdba)
  
  # id grid
  uid=201(grid)gid=200(oinstall) groups=200(oinstall),201(dba),202(oper),203(asmadmin),204(asmoper),205(asmdba)
  
  # id nobody
  uid=99(nobody)gid=99(nobody) groups=99(nobody)

(node2与node1相同)
  

  三、修改/etc目录下的4个文件
  node1
  #vi /etc/sysctl.conf
  fs.aio-max-nr= 1048576
  fs.file-max= 6815744
  kernel.shmall= 2097152
  kernel.shmmax= 536870912
  kernel.shmmni= 4096
  kernel.sem= 250 32000 100 128
  net.ipv4.ip_local_port_range= 9000 65500
  net.core.rmem_default= 262144
  net.core.rmem_max= 4194304
  net.core.wmem_default= 262144
  net.core.wmem_max= 1048586
  
  
  # sysctl –p
  
  
  #vi /etc/security/limits.conf
  oraclesoft nproc 2047
  oraclehard nproc 16384
  oraclesoft nofile1024
  oraclehard nofile 65536
  oraclesoft stack10240
  gridsoft nproc 2047
  gridhard nproc 16384
  gridsoft nofile 1024
  gridhard nofile65536
  gridsoft stack 10240

  #vi /etc/pam.d/login
  sessionrequired /lib/security/pam_limits.so

  #vi /etc/profile
  if [$USER = "oracle" ]||[ $USER = "grid" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
  fi
(node2与node1相同)


  四、关闭ntp服务,采用oracle自带的时间,
关闭邮件服务
  node1
  # chkconfig ntpd off
  
  # chkconfig ntpd --list
  
  # mv /etc/ntp.conf /etc/ntp.conf.bak
  
  
  
  # chkconfig sendmail off

  # chkconfig sendmail --list
  
  
  
  
  
(node2与node1相同)
  
  
  五、修改oracle和grid用户的环境变量
  
  node1
  #su - oracle
  
  $vi .bash_profile
  export EDITOR=vi
  exportORACLE_SID=prod1
  exportORACLE_BASE=/u01/app/oracle
  exportORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
  export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
  exportPATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
  umask 022

  $. .bash_profile
  
  #su – grid
  
  $vi .bash_profile
  export EDITOR=vi
  exportORACLE_SID=+ASM1
  exportORACLE_BASE=/u01/app/oracle
  exportORACLE_HOME=/u01/11.2.0/grid
  exportGRID_HOME=/u01/11.2.0/grid
  exportLD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
  exportTHREADS_FLAG=native
  exportPATH=$ORACLE_HOME/bin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/X11R6/bin
  umask 022
  
  $. .bash_profile
  (node2与node1相同)

六、硬盘分区创建ASM磁盘
node1
查看系统里所有磁盘情况
#fdisk -l
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255heads, 63 sectors/track, 2610 cylinders
Units= cylinders of 16065 * 512 = 8225280 bytes
DeviceBoot      Start         End      BlocksIdSystem
/dev/sda1*         1          13      104391   83Linux
/dev/sda2          14       2610    20860402+   8eLinux LVM
Disk/dev/sdb: 32.2 GB, 32212254720 bytes
255heads, 63 sectors/track, 3916 cylinders
Units= cylinders of 16065 * 512 = 8225280 bytes
Disk/dev/sdb doesn't contain a valid partition table
Disk/dev/sdc: 21.4 GB, 21474836480 bytes
255heads, 63 sectors/track, 2610 cylinders
Units= cylinders of 16065 * 512 = 8225280 bytes
Disk/dev/sdc doesn't contain a valid partition table

给/dev/sdb磁盘分区
# fdisk/dev/sdb
给/dev/sdc磁盘分区
# fdisk/dev/sdc
查看系统里的磁盘信息
# fdisk -l
格式化/dev/sdb1磁盘
# mkfs.ext3/dev/sdb1
挂载新磁盘/dev/sdb1到/u01,查看挂载情况
# mount/dev/sdb1 /u01
# df -h
Filesystem                     Size Used Avail Use% Mounted on
/dev/mapper/VolGroup00-LogVol0018G 6.1G   11G38% /
/dev/sda1                        99M12M   82M13% /boot
tmpfs                            782M   0 782M   0% /dev/shm
/dev/sdb1                        30G 173M   28G   1% /u01

查看物理内存和换页空间swap
# free -m
             total       used       free    shared    buffers   cached
Mem:          1562       1525         37          0         11       1438
-/+buffers/cache:         75       1486
Swap:         2047          0       2047

创建一个大文件
# ddif=/dev/zero of=/u01/swapfile1 bs=1024k count=2048
2048+0records in
2048+0records out
2147483648bytes (2.1 GB) copied, 5.66353 seconds, 379 MB/s

创建swap文件
# mkswap -c/u01/swapfile1
Settingup swapspace version 1, size = 2147479 Kb

挂载swap文件
# swapon/u01/swapfile1

查看物理内存和更改后的换页空间swap
# free -m
             total       used       free    shared    buffers   cached
Mem:          1562       1525         37          0         11       1438
-/+buffers/cache:         75       1486
Swap:         4095          0       4095

将挂载的新磁盘,增加的swap文件写入到fstab文件,重启系统后会自动挂载
# vi/etc/fstab
/dev/VolGroup00/LogVol00/                      ext3    defaults      1 1
LABEL=/boot             /boot                   ext3    defaults      1 2
tmpfs                   /dev/shm                tmpfs   defaults,size=1g      0 0
devpts                  /dev/pts                devptsgid=5,mode=6200 0
sysfs                   /sys                  sysfs   defaults      0 0
proc                  /proc                   proc    defaults       0 0
/dev/VolGroup00/LogVol01swap                   swap    defaults      0 0
/dev/sdb1            /u01                  ext3    defaults      0 0
/u01/swapfile1          swap                   swap    defaults      0 0


# mkfs.ext3/dev/sdb1
挂载新磁盘/dev/sdb1到/u01,查看挂载情况
# mount/dev/sdb1 /u01
# df -h
Filesystem                     Size Used Avail Use% Mounted on
/dev/mapper/VolGroup00-LogVol0018G 6.1G   11G38% /
/dev/sda1                        99M12M   82M13% /boot
tmpfs                            782M   0 782M   0% /dev/shm
/dev/sdb1                        30G 173M   28G   1% /u01

查看物理内存和换页空间swap
# free -m
             total       used       free    shared    buffers   cached
Mem:          1562       1525         37          0         11       1438
-/+buffers/cache:         75       1486
Swap:         2047          0       2047

创建一个大文件
# ddif=/dev/zero of=/u01/swapfile1 bs=1024k count=2048
2048+0records in
2048+0records out
2147483648bytes (2.1 GB) copied, 5.66353 seconds, 379 MB/s

创建swap文件
# mkswap -c/u01/swapfile1
Settingup swapspace version 1, size = 2147479 Kb

挂载swap文件
# swapon/u01/swapfile1

查看物理内存和更改后的换页空间swap
# free -m
             total       used       free    shared    buffers   cached
Mem:          1562       1525         37          0         11       1438
-/+buffers/cache:         75       1486
Swap:         4095          0       4095

将挂载的新磁盘,增加的swap文件写入到fstab文件,重启系统后会自动挂载
# vi/etc/fstab
/dev/VolGroup00/LogVol00/                      ext3    defaults      1 1
LABEL=/boot             /boot                   ext3    defaults      1 2
tmpfs                   /dev/shm                tmpfs   defaults,size=1g      0 0
devpts                  /dev/pts                devptsgid=5,mode=6200 0
sysfs                   /sys                  sysfs   defaults      0 0
proc                  /proc                   proc    defaults       0 0
/dev/VolGroup00/LogVol01swap                   swap    defaults      0 0
/dev/sdb1            /u01                  ext3    defaults      0 0
/u01/swapfile1          swap                   swap    defaults      0 0

查看ASM磁盘管理软件的位置 (从网站下载并上传到linux系统)
# cd /soft/asm
# ls
oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm
oracleasmlib-2.0.4-1.el5.i386.rpm
oracleasm-support-2.1.3-1.el5.i386.rpm

注意与内核版本的匹配
# uname -a
Linux node1 2.6.18-194.el5 #1 SMP Tue Mar16 21:52:43 EDT 2010 i686 i686 i386 GNU/Linux

安装ASM管理软件
# rpm -ivh *.rpm
warning:oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm:Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...            ###########################################
1:oracleasm-support      ########################################### [ 33%]
2:oracleasm-2.6.18-194.el########################################### [ 67%]
3:oracleasmlib            ###########################################

配置 oracleasm初始化
# serviceoracleasm configure
Configuringthe Oracle ASM library driver.
Thiswill configure the on-boot properties of the Oracle ASM library
driver.   The following questions will determinewhether the driver is
loadedon boot and what permissions it will have.The current values
willbe shown in brackets ('[]').   Hitting<ENTER> without typing an
answerwill keep that current value.   Ctrl-Cwill abort.
Defaultuser to own the driver interface []: grid
Defaultgroup to own the driver interface []: asmadmin
StartOracle ASM library driver on boot (y/n) : y
Scanfor Oracle ASM disks on boot (y/n) :
WritingOracle ASM library driver configuration: done
Initializingthe Oracle ASMLib driver: [   OK   ]
Scanningthe system for Oracle ASMLib disks:

建立 oracleasm 磁盘
# serviceoracleasm createdisk OCR_VOTE1 /dev/sdc1
Markingdisk "OCR_VOTE1" as an ASM disk:
# serviceoracleasm createdisk OCR_VOTE2 /dev/sdc2
Markingdisk "OCR_VOTE2" as an ASM disk:
# serviceoracleasm createdisk OCR_VOTE3 /dev/sdc3
Markingdisk "OCR_VOTE3" as an ASM disk:
# serviceoracleasm createdisk ASM_DATA1 /dev/sdc5
Markingdisk "ASM_DATA1" as an ASM disk:
# serviceoracleasm createdisk ASM_DATA2 /dev/sdc6
Markingdisk "ASM_DATA2" as an ASM disk:
# serviceoracleasm createdisk ASM_RCY1 /dev/sdc7
Markingdisk "ASM_RCY1" as an ASM disk: [   OK   ]
# serviceoracleasm createdisk ASM_RCY2 /dev/sdc8
Markingdisk "ASM_RCY2" as an ASM disk:
# serviceoracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2      
OCR_VOTE3

这个时候把node1 /soft/asm的三个包拷贝到node2 /soft/asm里

拷贝完后查看ASM磁盘管理软件的位置 (从网站下载并上传到linux系统)
注意与内核版本的匹配
# uname -a
Linux node1 2.6.18-194.el5 #1 SMP Tue Mar16 21:52:43 EDT 2010 i686 i686 i386 GNU/Linux

安装ASM管理软件
# rpm -ivh *.rpm
warning:oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm:Header V3 DSA signature: NOKEY, key ID 1e5e0159
Preparing...            ###########################################
1:oracleasm-support      ########################################### [ 33%]
2:oracleasm-2.6.18-194.el########################################### [ 67%]
3:oracleasmlib            ###########################################

Node2也需要执行oraclasm初始化
# serviceoracleasm configure
Configuringthe Oracle ASM library driver.
Thiswill configure the on-boot properties of the Oracle ASM library
driver.   The following questions will determinewhether the driver is
loadedon boot and what permissions it will have.The current values
willbe shown in brackets ('[]').   Hitting<ENTER> without typing an
answerwill keep that current value.   Ctrl-Cwill abort.
Defaultuser to own the driver interface []: grid
Defaultgroup to own the driver interface []: asmadmin
StartOracle ASM library driver on boot (y/n) : y
Scanfor Oracle ASM disks on boot (y/n) :
WritingOracle ASM library driver configuration: done
Initializingthe Oracle ASMLib driver: [   OK   ]
Scanningthe system for Oracle ASMLib disks:

然后执行asm扫描并查看
# serviceoracleasm scandisks
Scanningthe system for Oracle ASMLib disks:
# serviceoracleasm listdisks
ASM_DATA1
ASM_DATA2
ASM_RCY1
ASM_RCY2
OCR_VOTE1
OCR_VOTE2
OCR_VOTE3


(node1和node2,共享磁盘/dev/sdc不用配置,其他配置相同)



七、建立主机间的信任关系
建立节点之间 oracle 、grid用户之间的信任(通过 ssh生成成对秘钥)
node1   --oracle用户
# su - oracle
$ mkdir .ssh
$ ls -a
... .bash_history.bash_logout.bash_profile .bashrc.emacs.kde .mozilla.ssh.viminfo
$ ssh-keygen-t rsa
$ ssh-keygen -t dsa

Node2   --oracle用户
# su - oracle
$ mkdir .ssh
$ ls -a
... .bash_history.bash_logout.bash_profile .bashrc.emacs.kde .mozilla.ssh.viminfo
$ ssh-keygen-t rsa
$ ssh-keygen -t dsa
配置信任关系
$ ls .ssh
id_dsa   id_dsa.pubid_rsa   id_rsa.pub   known_hosts

$ cat.ssh/id_rsa.pub >> .ssh/authorized_keys

$ cat.ssh/id_dsa.pub >> .ssh/authorized_keys

$ ssh node2cat .ssh/id_rsa.pub >> .ssh/authorized_keys
$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
oracle@node2'spassword:

$ scp.ssh/authorized_keys node2:~/.ssh
oracle@node2'spassword:
authorized_keys                                          100% 1988   1.9KB/s00:00

验证信任关系
$ ssh node1date
$ ssh node1-priv date
$ ssh node2-priv date
$ ssh node2 date
$ ssh node1date
WedAug 27 00:48:15 CST 2014
$ sshnode1-priv date
WedAug 27 00:48:17 CST 2014
$ ssh node2date
WedAug 27 00:48:18 CST 2014
$ sshnode2-priv date
WedAug 27 00:48:21 CST 2014
$ ssh node2date;date
WedAug 27 00:50:28 CST 2014
WedAug 27 00:50:29 CST 2014
$ sshnode2-priv date;date
WedAug 27 00:50:38 CST 2014
WedAug 27 00:50:38 CST 2014

$ ssh node2date
$ ssh node2-priv date
$ ssh node1-priv date
$ ssh node1 date

$ ssh node2date
WedAug 27 00:49:09 CST 2014
$ sshnode2-priv date
WedAug 27 00:49:11 CST 2014
$ ssh node1date
WedAug 27 00:49:15 CST 2014
$ sshnode1-priv date
WedAug 27 00:49:19 CST 2014

$ ssh node1date;date
WedAug 27 00:51:28 CST 2014
WedAug 27 00:51:29 CST 2014
$ ssh node1-privdate;date
WedAug 27 00:51:48 CST 2014
WedAug 27 00:51:48 CST 2014


node1   --grid用户
# su - grid
$ mkdir .ssh
$ ls -a
... .bash_history.bash_logout.bash_profile .bashrc.emacs.kde .mozilla.ssh .viminfo

$ ssh-keygen-t rsa
G
$ ssh-keygen-t dsa
Node2   --grid用户
# su - grid
$ mkdir .ssh
$ ls -a
... .bash_history.bash_logout.bash_profile .bashrc.emacs.kde .mozilla.ssh .viminfo

$ ssh-keygen-t rsa
$ ssh-keygen -t dsa
配置信任关系

$ cat.ssh/id_rsa.pub >> .ssh/authorized_keys

$ cat.ssh/id_dsa.pub >> .ssh/authorized_keys

$ ssh node2cat .ssh/id_rsa.pub >> .ssh/authorized_keys
$ ssh node2 cat .ssh/id_dsa.pub >>.ssh/authorized_keys
grid@node2'spassword:

$ scp.ssh/authorized_keys node2:~/.ssh
grid@node2'spassword:
authorized_keys                                          100% 1984   1.9KB/s   00:00


验证信任关系
$ ssh node1date
$ ssh node1-priv date
$ ssh node2-priv date
$ ssh node2 date
$ ssh node1date
WedAug 27 00:57:37 CST 2014
$ sshnode1-priv date
WedAug 27 00:57:39 CST 2014
$ ssh node2date
WedAug 27 00:57:41 CST 2014
$ sshnode2-priv date
WedAug 27 00:57:43 CST 2014
$ sshnode2-priv date;date
WedAug 27 00:57:50 CST 2014
WedAug 27 00:57:51 CST 2014
$ ssh node2date;date
WedAug 27 00:58:01 CST 2014
WedAug 27 00:58:01 CST 2014

$ ssh node2date
$ ssh node2-priv date
$ ssh node1-priv date
$ ssh node1date

$ ssh node2date
WedAug 27 00:59:01 CST 2014
$ sshnode2-priv date
WedAug 27 00:59:03 CST 2014
$ ssh node1date
WedAug 27 00:59:05 CST 2014
$ sshnode1-priv date
WedAug 27 00:59:08 CST 2014

$ sshnode1-priv date;date
WedAug 27 00:59:12 CST 2014
WedAug 27 00:59:12 CST 2014
$ ssh node1date;date
WedAug 27 00:59:25 CST 2014
WedAug 27 00:59:24 CST 2014

八、校验安装前的环境
以 grid用户的身份校验安装环境(在 grid 的安装软件包目录下)
# cd /soft
# ls
asm linux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.ziplinux_11gR2_grid.zip
# unzip linux_11gR2_grid.zip
# ls
asm gridlinux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.ziplinux_11gR2_grid.zip
# chown -Rgrid:oinstall grid/
# chmod -R775 grid/
# chown -Rgrid:oinstall /tmp/bootstrap/    没有这个目录就不用操作了
# chmod -R775 /tmp/bootstrap/            没有这个目录就不用操作了
# su - grid
$ cd/soft/grid/
$./runcluvfy.sh stage -pre crsinst -n node1,node2 -fixup -verbose

注意其中“failed”的位置
对于校验中没有安装的软件包进行安装(所有节点)
(node1和node2 相同)最后所有的节点都应该是passed自己检查一下。
安装Grid

# /u01/app/oraInventory/orainstRoot.sh
Changingpermissions of /u01/app/oraInventory.
Addingread,write permissions for group.
Removingread,write,execute permissions for world.
Changinggroupname of /u01/app/oraInventory to oinstall.
Theexecution of the script is complete.

节点2也要运行/u01/app/oraInventory/orainstRoot.sh

# /u01/11.2.0/grid/root.sh
节点2也要运行/u01/11.2.0/grid/root.sh

(node2也这样,记住node1运行完第一个脚本,node2也要运行第一个脚本,然后node1再运行第二个脚本
node2也再运行第二个脚本,顺序不能错。)

完成grid安装后,检查crs进程是否开启
node1
# vi /etc/profile
exportPATH=$PATH:/u01/11.2.0/grid/bin


# source /etc/profile


# crsctl check crs
CRS-4638:Oracle High Availability Services is online
CRS-4537:Cluster Ready Services is online
CRS-4529:Cluster Synchronization Services is online
CRS-4533:Event Manager is online


# crs_stat -t
Name         Type         Target    State    Host      
------------------------------------------------------------
ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      
ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      
ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      
ora.asm      ora.asm.type   ONLINE   ONLINE    node1      
ora.eons       ora.eons.typeONLINE   ONLINE    node1      
ora.gsd      ora.gsd.type   OFFLINEOFFLINE               
ora....networkora....rk.type ONLINE    ONLINE    node1      
ora....SM1.asmapplication    ONLINE    ONLINE   node1      
ora....E1.lsnrapplication    ONLINE    ONLINE   node1      
ora.node1.gsdapplication   OFFLINE   OFFLINE               
ora.node1.onsapplication   ONLINE    ONLINE    node1      
ora.node1.vipora....t1.type ONLINE    ONLINE   node1      
ora....SM2.asmapplication    ONLINE    ONLINE   node2      
ora....E2.lsnrapplication    ONLINE    ONLINE   node2      
ora.node2.gsdapplication   OFFLINE   OFFLINE               
ora.node2.onsapplication   ONLINE    ONLINE    node2      
ora.node2.vipora....t1.type ONLINE    ONLINE   node2      
ora.oc4j       ora.oc4j.typeOFFLINEOFFLINE               
ora.ons      ora.ons.type   ONLINE   ONLINE    node1      
ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      
ora.scan1.vipora....ip.type ONLINE    ONLINE   node1      








node2
# vi /etc/profile
exportPATH=$PATH:/u01/11.2.0/grid/bin


# source /etc/profile


# crsctl check crs
CRS-4638:Oracle High Availability Services is online
CRS-4537:Cluster Ready Services is online
CRS-4529:Cluster Synchronization Services is online
CRS-4533:Event Manager is online



# crs_stat -t
Name         Type         Target   State   Host      
------------------------------------------------------------
ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      
ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      
ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      
ora.asm      ora.asm.type   ONLINE   ONLINE    node1      
ora.eons       ora.eons.typeONLINE   ONLINE    node1      
ora.gsd      ora.gsd.type   OFFLINEOFFLINE               
ora....networkora....rk.type ONLINE    ONLINE   node1      
ora....SM1.asmapplication    ONLINE    ONLINE   node1      
ora....E1.lsnrapplication    ONLINE    ONLINE   node1      
ora.node1.gsdapplication   OFFLINE   OFFLINE               
ora.node1.onsapplication   ONLINE    ONLINE    node1      
ora.node1.vipora....t1.type ONLINE    ONLINE   node1      
ora....SM2.asmapplication    ONLINE    ONLINE   node2      
ora....E2.lsnrapplication    ONLINE    ONLINE   node2      
ora.node2.gsdapplication   OFFLINE   OFFLINE               
ora.node2.onsapplication   ONLINE    ONLINE    node2      
ora.node2.vipora....t1.type ONLINE    ONLINE   node2      
ora.oc4j       ora.oc4j.typeOFFLINEOFFLINE               
ora.ons      ora.ons.type   ONLINE   ONLINE    node1      
ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      
ora.scan1.vipora....ip.type ONLINE    ONLINE   node1      



完成grid安装后,检查crs进程是否开启
node1
# vi /etc/profile
exportPATH=$PATH:/u01/11.2.0/grid/bin


# source /etc/profile


# crsctl check crs
CRS-4638:Oracle High Availability Services is online
CRS-4537:Cluster Ready Services is online
CRS-4529:Cluster Synchronization Services is online
CRS-4533:Event Manager is online


# crs_stat -t
Name         Type         Target    State    Host      
------------------------------------------------------------
ora....ER.lsnrora....er.type ONLINE    ONLINE    node1      
ora....N1.lsnrora....er.type ONLINE    ONLINE    node1      
ora....VOTE.dgora....up.type ONLINE    ONLINE    node1      
ora.asm      ora.asm.type   ONLINE   ONLINE    node1      
ora.eons       ora.eons.typeONLINE   ONLINE    node1      
ora.gsd      ora.gsd.type   OFFLINEOFFLINE               
ora....networkora....rk.type ONLINE    ONLINE    node1      
ora....SM1.asmapplication    ONLINE    ONLINE   node1      
ora....E1.lsnrapplication    ONLINE    ONLINE   node1      
ora.node1.gsdapplication   OFFLINE   OFFLINE               
ora.node1.onsapplication   ONLINE    ONLINE    node1      
ora.node1.vipora....t1.type ONLINE    ONLINE   node1      
ora....SM2.asmapplication    ONLINE    ONLINE   node2      
ora....E2.lsnrapplication    ONLINE    ONLINE   node2      
ora.node2.gsdapplication   OFFLINE   OFFLINE               
ora.node2.onsapplication   ONLINE    ONLINE    node2      
ora.node2.vipora....t1.type ONLINE    ONLINE   node2      
ora.oc4j       ora.oc4j.typeOFFLINEOFFLINE               
ora.ons      ora.ons.type   ONLINE   ONLINE    node1      
ora....ry.acfsora....fs.type ONLINE    ONLINE    node1      
ora.scan1.vipora....ip.type ONLINE    ONLINE   node1   




九、安装oracle软件

# cd /soft/

# ls
asmgridlinux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.zip linux_11gR2_grid.zip

# unzip linux_11gR2_database_1of2.zip
…………

# unzip linux_11gR2_database_2of2.zip
…………

# ls
asmgrid database linux_11gR2_database_1of2.ziplinux_11gR2_database_2of2.zip linux_11gR2_grid.zip

# chown -R oracle:oinstall database/

# chmod -R 775 database/

# su - oracle

$ cd /soft/database/

$ ls
docinstall responserpmrunInstaller sshsetupstagewelcome.html




安装前的准备与grid方式一样
$ ./runInstaller
StartingOracle Universal Installer...

CheckingTemp space: must be greater than 80 MB.Actual 7196 MB    Passed
Checkingswap space: must be greater than 150 MB.Actual 4005 MB    Passed
Checkingmonitor: must be configured to display at least 256 colors.    Actual 16777216    Passed
Preparingto launch Oracle Universal Installer from /tmp/OraInstall2014-08-27_03-43-06AM.Please wait ...$










node1
# /u01/app/oracle/product/11.2.0/db_1/root.sh
RunningOracle 11g root.sh script...
Thefollowing environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1
Enterthe full pathname of the local bin directory: :
Thefile "dbhome" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying dbhome to /usr/local/bin ...
Thefile "oraenv" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying oraenv to /usr/local/bin ...
Thefile "coraenv" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying coraenv to /usr/local/bin ...
Entrieswill be added to the /etc/oratab file as needed by
DatabaseConfiguration Assistant when a database is created
Finishedrunning generic part of root.sh script.
Nowproduct-specific root actions will be performed.
Finishedproduct-specific root actions.



node2
# /u01/app/oracle/product/11.2.0/db_1/root.sh
RunningOracle 11g root.sh script...

Thefollowing environment variables are set as:
    ORACLE_OWNER= oracle
    ORACLE_HOME=/u01/app/oracle/product/11.2.0/db_1
Enterthe full pathname of the local bin directory: :
Thefile "dbhome" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying dbhome to /usr/local/bin ...
Thefile "oraenv" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying oraenv to /usr/local/bin ...
Thefile "coraenv" already exists in /usr/local/bin.Overwrite it? (y/n)
:y
   Copying coraenv to /usr/local/bin ...
Entrieswill be added to the /etc/oratab file as needed by
DatabaseConfiguration Assistant when a database is created
Finishedrunning generic part of root.sh script.
Nowproduct-specific root actions will be performed.
Finishedproduct-specific root actions.





十、创建ASM磁盘组
# su - grid
$ asmca




十一、DBCA建库
  $dbca
  
  完成oracle数据库的安装
  验证
  $sqlplus / as sysdba
  
  SQL*Plus: Release11.2.0.1.0 Production on Wed Aug 27 04:52:36 2014
  
  Copyright (c)1982, 2009, Oracle.All rights reserved.
  
  
  Connected to:
  Oracle Database11g Enterprise Edition Release 11.2.0.1.0 - Production
  With thePartitioning, Real Application Clusters, Automatic Storage Management, OLAP,
  Data Mining andReal Application Testing options
  
  SQL> select status from gv$instance;
  
  STATUS
  ------------
  OPEN
  OPEN
  
  SQL> show parameter name
  
  NAME                                 TYPE      VALUE
  ----------------------------------------------- ------------------------------
  db_file_name_convert               string
  db_name                              string      prod
  db_unique_name                     string      prod
  global_names                         boolean   FALSE
  instance_name                        string      prod1
  lock_name_space                      string
  log_file_name_convert                string
  service_names                        string      prod
  
  
  
  
  
  
  
  
  
  
  
  
页: [1]
查看完整版本: redhat6 + 11G RAC 双节点部署