hadoop集群搭建(一)HDFS的namenode的HA搭建
HDFS的namenode的HA搭建,准备好机器hadoop01 IP:192.168.216.203 GATEWAY:192.168.216.2
hadoop02 IP:192.168.216.204 GATEWAY:192.168.216.2
hadoop03 IP:192.168.216.205 GATEWAY:192.168.216.2
配置网卡
# vim /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
HWADDR=00:0C:29:6B:CD:B3 网卡MAC地址
ONBOOT=yes yes表示开机启动
NM_CONTROLLED=yes
BOOTPROTO=none
IPADDR=192.168.216.203 IP地址
PREFIX=24
GATEWAY=192.168.216.2 网关
DNS1=8.8.8.8 域名解析服务器地址一
DNS2=192.168.10.254 域名解析服务器地址 域名解析服务器地址二
DEFROUTE=yes
IPV4_FAILURE_FATAL=yes
IPV6INIT=no
NAME="System eth0"
安装java JDK 并配置环境变量
# vim /etc/profile
#my setting
export JAVA_HOME=/usr/local/jdk1.8.0_152/
export PATH=$PATH:$JAVA_HOME/bin:
配置hadoop01/hadoop02/hadoop03之间互相ssh免密登陆
# vim ./etc/hadoop/hadoop-env.sh
# The java implementation to use.
export JAVA_HOME=/usr/local/jdk1.8.0_152/
# vim /usr/local/hadoop-2.7.1/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://qian
ha.zookeeper.quorum
hadoop01:2181,hadoop02:2181,hadoop03:2181
# vim /usr/local/hadoop-2.7.1/etc/hadoop/hdfs-site.xml
dfs.nameservices
qian
dfs.ha.namenodes.qian
nn1,nn2
dfs.namenode.rpc-address.qian.nn1
hadoop01:9000
dfs.namenode.rpc-address.qian.nn2
hadoop02:9000
dfs.namenode.http-address.qian.nn1
hadoop01:50070
dfs.namenode.http-address.qian.nn2
hadoop02:50070
dfs.namenode.shared.edits.dir
qjournal://hadoop01:8485;hadoop02:8485;hadoop03:8485/qian
dfs.journalnode.edits.dir
/home/hadata/journalnode/data
dfs.ha.automatic-failover.enabled
true
dfs.client.failover.proxy.provider.qian
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
dfs.ha.fencing.methods
sshfence
dfs.ha.fencing.ssh.private-key-files
/root/.ssh/id_rsa
dfs.ha.fencing.ssh.connect-timeout
30000
dfs.namenode.name.dir
/home/hadata/dfs/name
dfs.datanode.data.dir
/home/hadata/dfs/data
dfs.blocksize
134217728
dfs.permissions.enabled
false
dfs.replication
3
# vim /usr/local/hadoop-2.7.1/etc/hadoop/slaves
hadoop01
hadoop02
hadoop03
安装并配置zookeeper
# tar -zxvf /home/zookeeper-3.4.10.tar.gz -C /usr/local/
# cp ./conf/zoo_sample.cfg ./conf/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=5
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=2
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/home/zookeeperdata
# the port at which the clients will connect
clientPort=2181
server.1=hadoop01:2888:3888
server.2=hadoop02:2888:3888
server.3=hadoop03:2888:3888
# scp -r /usr/local/zookeeper-3.4.10 hadoop02:/usr/local/
# scp -r /usr/local/zookeeper-3.4.10 hadoop03:/usr/local/
配置三台机器的环境变量
# vim /etc/profile
#my setting
export JAVA_HOME=/usr/local/jdk1.8.0_152/
export HADOOP_HOME=/usr/local/hadoop-2.7.1/
export ZK_HOME=/usr/local/zookeeper-3.4.10/
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$ZK_HOME/bin:
# scp -r /etc/profile hadoop02:/etc
profile
# scp -r /etc/profile hadoop03:/etc
profile
# source /etc/profile
# source /etc/profile
# source /etc/profile
# mkdir /home/zookeeperdata
# vim /home/zookeeperdata/myid myid文件里输入 1
1
# mkdir /home/zookeeperdata
# vim /home/zookeeperdata/myid myid文件里输入 2
2
# mkdir /home/zookeeperdata
# vim /home/zookeeperdata/myid myid文件里输入 3
3
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: leader
# scp -r /usr/local/hadoop-2.7.1/ hadoop02:/usr/local/
# scp -r /usr/local/hadoop-2.7.1/ hadoop03:/usr/local/
# hadoop-daemon.sh start journalnode
# hadoop-daemon.sh start journalnode
# hadoop-daemon.sh start journalnode
# hadoop namenode -format
# hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/hadoop-2.7.1/logs/hadoop-root-namenode-hadoop01.out
同步已启动的namenode的元数据到为启动的nomenode
# hdfs namenode -bootstrapStandby
确认zookeeper集群是否启动
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: follower
# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/zookeeper-3.4.10/bin/../conf/zoo.cfg
Mode: leader
# hdfs zkfc -formatZK
.
.
.
.
....INFO ha.ActiveStandbyElector: Successfully created /hadoop-ha/qian in ZK.
.
.
.
# zkCli.sh
WatchedEvent state:SyncConnected type:None path:null
ls /
ls /hadoop-ha
ls /hadoop-ha/qian
[]
注意:退出zkCli,输入quit
# start-dfs.sh
# jps
3281 JournalNode
4433 Jps
3475 NameNode
4068 DataNode
3110 QuorumPeerMain
4367 DFSZKFailoverController
# jps
3489 DataNode
3715 Jps
2970 QuorumPeerMain
3162 JournalNode
3646 DFSZKFailoverController
3423 NameNode
# zkCli.sh
zkCli.sh
WATCHER::
WatchedEvent state:SyncConnected type:None path:null
ls /hadoop-ha/qian
get /hadoop-ha/qian/ActiveBreadCrumb
qiannn1hadoop01 �F(�>
cZxid = 0x10000000a
ctime = Sat Jan 13 01:40:21 CST 2018
mZxid = 0x10000000a
mtime = Sat Jan 13 01:40:21 CST 2018
pZxid = 0x10000000a
cversion = 0
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 31
numChildren = 0
# hdfs dfs -put ./README.txt hdfs:/
# hdfs dfs -ls /
18/01/13 01:58:24 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java> Found 1 items
-rw-r--r-- 3 root supergroup 1366 2018-01-13 01:57 /README.txt
测试是否失败转移
# jps
3281 JournalNode
3475 NameNode
4644 Jps
4068 DataNode
3110 QuorumPeerMain
4367 DFSZKFailoverController
# kill -9 3475
# zkCli.sh
ActiveBreadCrumb ActiveStandbyElectorLock
get /hadoop-ha/qian/ActiveBreadCrumb
qiannn2hadoop02 �F(�>
cZxid = 0x10000000a
ctime = Sat Jan 13 01:40:21 CST 2018
mZxid = 0x100000011
mtime = Sat Jan 13 02:01:57 CST 2018
pZxid = 0x10000000a
cversion = 0
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 31
numChildren = 0
# jps
3489 DataNode
3989 Jps
2970 QuorumPeerMain
3162 JournalNode
3646 DFSZKFailoverController
3423 NameNode
注意:一个namenode1死了会自动切换到另一个namenode2上,namenode2死后,就都死了,不会自动启动namenode1
配置集群时间同步
HA搭建完毕
页:
[1]