奇忠诚 发表于 2019-1-7 09:14:46

Heartbeat+DRBD+NFS高可用

  一、环境
  系统:   CentOS 6.4x64 最小化安装
  node1:   192.168.1.13
  node2:   192.168.1.14
  vip:      192.168.1.15
  nfs:      192.168.1.10
  二、基础配置
  node1和node2的操作一样
#关闭iptables和selinux
# getenforce
Disabled                        #确保这项是正确的
# service iptables stop
#配置本地hosts解析
# echo "192.168.1.13node1" >>/etc/hosts
# echo "192.168.1.14node2" >>/etc/hosts
# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.13node1
192.168.1.14node2
#配置epel源
# rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
# sed -i 's@#b@b@g' /etc/yum.repos.d/epel.repo
# sed-i 's@mirrorlist@#mirrorlist@g' /etc/yum.repos.d/epel.repo
#同步时间
# yum install ntp -y
# echo "*/10 * * * * /usr/sbin/ntpdate asia.pool.ntp.org&>/dev/null" >/var/spool/cron/root
# ntpdate asia.pool.ntp.org
21 Jun 17:32:45 ntpdate: step time server 211.233.40.78 offset -158.552839 sec
# hwclock -w
#配置ssh互信
# ssh-keygen
# ssh-copy-id -i ~/.ssh/id_rsa.pub root@node2  三、安装配置heartbeat
  (1).安装heartbeat
#在ha-node1和ha-node2都执行安装操作
# yum install heartbeat -y  (2).配置ha.cf
# cd /usr/share/doc/heartbeat-3.0.4/
# cp authkeys ha.cf haresources /etc/ha.d/
# cd /etc/ha.d/
# ls
authkeysha.cfharcharesourcesrc.dREADME.configresource.dshellfuncs
# egrep -v "^$|^#" /etc/ha.d/ha.cf
logfile/var/log/ha-log
logfacilitylocal1
keepalive 2
deadtime 30
warntime 10
initdead 120
mcast eth0 225.0.10.1 694 1 0
auto_failback on
node node1
node node2
crm no  (3).配置authkeys
# dd if=/dev/random bs=512 count=1 | openssl md5
0+1 records in
0+1 records out
21 bytes (21 B) copied, 3.1278e-05 s, 671 kB/s
(stdin)= 4206bd8388c16292bc03710a0c747f59
# grep -v ^# /etc/ha.d/authkeys
auth 1
1 md5 4206bd8388c16292bc03710a0c747f59
#将认证文件权限修改成600
# chmod 600 /etc/ha.d/authkeys  (4).配置haresource
# grep -v ^# /etc/ha.d/haresources
node1 IPaddr::192.168.1.15/24/eth0  (5).启动heartbeat
# scp authkeys haresourcesha.cf node2:/etc/ha.d/
#node1启动服务
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO:Resource is stopped
Done.
# chkconfig heartbeat off
#说明:关闭开机自启动,当服务器重启时,需要人工去启动
#node2启动服务
# /etc/init.d/heartbeat start
#查看结果
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0   #vip在主节点上
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0    #备节点上没有vip  (6).测试heartbeat
  正常状态
#node1信息
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0   #vip在主节点上
#node2信息
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0    #备节点上没有vip  模拟主节点宕机后的状态信息
#在主节点node1停止heartbeat服务
# /etc/init.d/heartbeat stop
Stopping High-Availability services: Done.
# ip a |grep eth0               #主节点的heartbeat服务停止后,vip资源被抢走
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0
#在备节点node2查看资源
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0  恢复主节点的heartbeat服务
# /etc/init.d/heartbeat start
Starting High-Availability services: INFO:Resource is stopped
Done.
#主节点的heartbeat服务恢复后,将资源接管回来了
# ip a |grep eth0
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.13/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.15/24 brd 192.168.1.255 scope global secondary eth0
#查看备节点
# ip a |grep eth0         #vip资源已移除
2: eth0:mtu 1500 qdisc pfifo_fast state UP qlen 1000
    inet 192.168.1.14/24 brd 192.168.1.255 scope global eth0  
四、安装部署DRBD

(1).对硬盘进行分区,node1和node2的操作一样
# fdisk /dev/sdb
#说明:/dev/sdb分成2个分区/dev/sdb1和/dev/sdb2,/dev/sdb1=19G
# partprobe /dev/sdb
#对分区进行格式化
# mkfs.ext4 /dev/sdb1
说明:sdb2分区为meta data分区,不需要格式化操作
# tune2fs -c -1 /dev/sdb1
说明:设置最大挂载数为-1,关闭强制检查挂载次数限制(2).安装DRBD
由于我们的系统是CentOS6.4的,所以我们还需要安装内核模块,版本需要和uname -r保持一致,安装包我们从系统安装软件中提取出来,过程略。node1和node2的安装过程一样,这里只给出node1的安装过程
#安装内核文件
# rpm -ivh kernel-devel-2.6.32-358.el6.x86_64.rpm kernel-headers-2.6.32-358.el6.x86_64.rpm
# yum install drbd84 kmod-drbd84 -y
(3).配置DRBD

a.修改全局配置文件
# egrep -v "^$|^#|^[[:space:]]+#" /etc/drbd.d/global_common.conf
global {
usage-count no;
}
common {
protocol C;
handlers {
}
startup {
}
options {
}
disk {
                on-io-error detach;
no-disk-flushes;
no-md-flushes;
rate 200M;
}
net {
sndbuf-size 512k;
      max-buffers   8000;
      unplug-watermark   1024;
      max-epoch-size8000;
      cram-hmac-alg "sha1";
      shared-secret "weyee2014";
      after-sb-0pri disconnect;
      after-sb-1pri disconnect;
      after-sb-2pri disconnect;
      rr-conflict disconnect;
}
}  b.增加资源
# cat /etc/drbd.d/nfsdata.res
resource nfsdata {
    on node1 {
      device /dev/drbd1;
      disk    /dev/sdb1;
      address 192.168.1.13:7789;
      meta-disk /dev/sdb2 ;
    }
    on node2 {
      device /dev/drbd1;
      disk/dev/sdb1;
      address 192.168.1.14:7789;
      meta-disk /dev/sdb2 ;
    }
}  c.将配置文件复制到node2上,重启系统加载drbd模块,初始化meta数据
# scp global_common.conf nfsdata.res node2:/etc/drbd.d/
# depmod
# modprobe drbd
# lsmod |grep drbd
drbd                  3659310
libcrc32c               12461 drbd
#在node1初始化meta数据
# drbdadm create-md nfsdata
initializing activity log
NOT initializing bitmap
Writing meta data...
New drbd meta data block successfully created.
#在node2上加载模块,初始化meta数据
# depmod
# modprobe drbd
# lsmod |grep drbd
drbd                  3659310
libcrc32c               12461 drbd
# drbdadm create-md nfsdata
initializing activity log
NOT initializing bitmap
Writing meta data...
New drbd meta data block successfully created.  d.在node1和node2上启动drbd
#node1操作  




页: [1]
查看完整版本: Heartbeat+DRBD+NFS高可用