hadoop&spark安装(上)
#修改hadoop文件配置$ pwd
/usr/local/hadoop/etc/hadoop
$ cat core-site.xml
fs.defaultFS
hdfs://hddcluster2:9000
hadoop.tmp.dir
file:/usr/local/hadoop/tmp
Abase for other temporary directories.
$ cat hdfs-site.xml
dfs.namenode.secondary.http-address
hddcluster2:50090
dfs.replication
3
dfs.namenode.name.dir
file:/usr/local/hadoop/tmp/dfs/name
dfs.datanode.data.dir
file:/usr/local/hadoop/tmp/dfs/data
$
$ cat mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
hddcluster2:10020
mapreduce.jobhistory.webapp.address
hddcluster2:19888
$
$ cat yarn-site.xml
yarn.resourcemanager.hostname
hddcluster2
yarn.nodemanager.aux-services
mapreduce_shuffle
$
$ cat slaves
hddcluster1
hddcluster2
hddcluster3
hddcluster4
页:
[1]