#配置文件完成
#切换到master主机,开启
[root@master hadoop]# vi/opt/modules/hadoop/hadoop-1.1.2/conf/masters
node1
node2
[root@master hadoop]# vi/opt/modules/hadoop/hadoop-1.1.2/conf/slaves
master
node1
node2
#节点配置,这里很重要,masters文件不需要把masters加入 hadoop1.1.2的node1,node2配置开始
前面的网络的配置我就不说了:
#登录master,将authorized_keys,发过去
[root@master ~]# scp /home/hadoop/.ssh/authorized_keys root@node1:/home/hadoop/.ssh/
[root@master ~]# scp /home/hadoop/.ssh/authorized_keys root@node1:/home/hadoop/.ssh/
The authenticity of host 'node1 (192.168.1.111)' can't be established.
RSA key fingerprint is 0d:aa:04:89:28:44:b9:e8:bb:5e:06:d0:dc:de:22:85.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.1.111' (RSA) to the list of known hosts.
root@node1's password:
#切换到node1主机
[root@master ~]# su hadoop
[hadoop@master root]$ ssh master
Last login: Sun Mar 23 23:17:06 2014 from 192.168.1.110
[root@master hadoop]# vi/opt/modules/hadoop/hadoop-1.1.2/conf/masters
node1
node2
[root@master hadoop]# vi/opt/modules/hadoop/hadoop-1.1.2/conf/slaves
master
node1
node2
#切换到master主机,开启
[hadoop@master conf]$ hadoop namenode -format
Warning: $HADOOP_HOME is deprecated.
14/03/24 13:33:52 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = master/192.168.1.110
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 1.1.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.1 -r 1440782; compiled by 'hortonfo' on Thu Jan 31 02:03:24 UTC 2013
************************************************************/
Re-format filesystem in /data/hadoop/hdfs/name ? (Y or N) Y
14/03/24 13:33:54 INFO util.GSet: VM type = 32-bit
14/03/24 13:33:54 INFO util.GSet: 2% max memory = 0.61875 MB
14/03/24 13:33:54 INFO util.GSet: capacity = 2^17 = 131072 entries
14/03/24 13:33:54 INFO util.GSet: recommended=131072, actual=131072
14/03/24 13:33:55 INFO namenode.FSNamesystem: fsOwner=hadoop
14/03/24 13:33:55 INFO namenode.FSNamesystem: supergroup=supergroup
14/03/24 13:33:55 INFO namenode.FSNamesystem: isPermissionEnabled=false
14/03/24 13:33:55 INFO namenode.FSNamesystem: dfs.block.invalidate.limit=100
14/03/24 13:33:55 INFO namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
14/03/24 13:33:55 INFO namenode.NameNode: Caching file names occuring more than 10 times
14/03/24 13:33:55 INFO common.Storage: Image file of> 14/03/24 13:33:56 INFO namenode.FSEditLog: closing edit log: position=4, editlog=/data/hadoop/hdfs/name/current/edits
14/03/24 13:33:56 INFO namenode.FSEditLog: close success: truncate to 4, editlog=/data/hadoop/hdfs/name/current/edits
14/03/24 13:33:56 INFO common.Storage: Storage directory /data/hadoop/hdfs/name has been successfully formatted.
14/03/24 13:33:56 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.1.110
************************************************************/
[hadoop@master bin]$ start-all.sh
[hadoop@master bin]$ jps
7603 TaskTracker
7241 DataNode
7119 NameNode
7647 Jps
7473 JobTracker zookeeper-3.4.5配置开始
#在master机器上安装,为namenode
[root@master hadoop]# tar -zxvf zookeeper-3.4.5-1374045102000.tar.gz
[root@master hadoop]# chown -R hadoop:hadoop zookeeper-3.4.5
[root@master hadoop]# vi /opt/modules/hadoop/zookeeper-3.4.5/conf/zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/data/zookeeper
# the port at which the clients will connect
clientPort=2181
server.1=192.168.1.110:2888:3888
server.2=192.168.1.111:2888:3888
server.3=192.168.1.112:2888:3888
#新建文件myid(在zoo.cfg 配置的dataDir目录下,此处为/home/hadoop/zookeeper),使得myid中的值与server的编号相同,比如namenode上的myid: 1。datanode1上的myid:2。以此类推。
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
#开始配置。
[root@master hadoop]# mkdir -p /data/zookeeper/
[root@master hadoop]# chown -R hadoop:hadoop /data/zookeeper/
[root@master hadoop]# echo "1" > /data/zookeeper/myid
[root@master hadoop]# cat /data/zookeeper/myid
1
[root@node1 zookeeper-3.4.5]# chown -R hadoop:hadoop /data/zookeeper/*
[root@master hadoop]# scp -r /opt/modules/hadoop/zookeeper-3.4.5/ root@node1:/opt/modules/hadoop/
#将/opt/modules/hadoop/zookeeper-3.4.5发送到node1节点,新增一个myid为2
#切换到node1
[root@node1 data]# echo "2" > /data/zookeeper/myid
[root@node1 data]# cat /data/zookeeper/myid
2
[root@node1 zookeeper-3.4.5]# chown -R hadoop:hadoop /opt/modules/hadoop/zookeeper-3.4.5
[root@node1 zookeeper-3.4.5]# chown -R hadoop:hadoop /data/zookeeper/*
#切换到master
[root@master hadoop]# su hadoop
[hadoop@master hadoop]$ cd zookeeper-3.4.5
[hadoop@master bin]$ ./zkServer.sh start
JMX enabled by default
Using config: /opt/modules/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@master bin]$ jps
5507 NameNode
5766 JobTracker
6392 Jps
6373 QuorumPeerMain
5890 TaskTracker
5626 DataNode
[root@node1 zookeeper-3.4.5]# su hadoop
[hadoop@node1 zookeeper-3.4.5]$ cd bin/
[hadoop@node1 bin]$ ./zkServer.sh start
JMX enabled by default
Using config: /opt/modules/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[hadoop@node1 bin]$ jps
5023 SecondaryNameNode
5120 TaskTracker
5445 Jps
4927 DataNode
5415 QuorumPeerMain
#两边开启之后,就测试一下Mode: follower代表正常
[hadoop@master bin]$ ./zkServer.sh status
JMX enabled by default
Using config: /opt/modules/hadoop/zookeeper-3.4.5/bin/../conf/zoo.cfg
Mode: follower
-----------------------------------zookeeper-3.4.5配置结束-----------------------------------
[root@master ~]# su hadoop
[hadoop@master root]$ cd /opt/modules/hadoop/zookeeper-3.4.5/bin/
[hadoop@master bin]$ ./zkServer.sh start hbase配置开始,三台机器都需要的
[root@master hadoop]# tar -zxvf hbase-0.96.1.1-hadoop1-bin.tar.gz
#解压文件
[root@master hadoop]# vi /etc/profile.d/java_hadoop.sh
export JAVA_HOME=/usr/java/jdk1.7.0_45/
export HADOOP_HOME=/opt/modules/hadoop/hadoop-1.1.2/
export HBASE_HOME=/opt/modules/hadoop/hbase-0.96.1.1/
export HBASE_CLASSPATH=/opt/modules/hadoop/hadoop-1.1.2/conf/
export HBASE_MANAGES_ZK=true
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HBASE_HOME/bin
#配置环境变量。
[root@master hadoop]# source /etc/profile
[root@master hadoop]# echo $HBASE_CLASSPATH
/opt/modules/hadoop/hadoop-1.1.2/conf/
[root@master conf]# vi /opt/modules/hadoop/hbase-0.96.1.1/conf/hbase-site.xml