yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.address</name>
<value>cloud1:8032</value>
<description>ResourceManager host:port for clients to submit jobs. </description>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>cloud1:8030</value>
<description>ResourceManager host:port for ApplicationMasters to talk to Scheduler to obtain resources.</description>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>cloud1:8031</value>
<description>ResourceManager host:port for NodeManagers. </description>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>cloud1:8033</value>
<description>ResourceManager host:port for administrative commands. </description>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>cloud1:8088</value>
<description>ResourceManager web-ui host:port. </description>
</property>
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
<description>In case you do not want to use the default scheduler</description>
</property>
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>10240</value>
<description>the amount of memory on the NodeManager in MB</description>
</property>
<property>
<name>yarn.nodemanager.local-dirs</name>
<value></value>
<description>the local directories used by the nodemanager</description>
</property>
<property>
<name>yarn.nodemanager.log-dirs</name>
<value></value>
<description>the directories used by Nodemanagers as log directories</description>
</property>
<property>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/app-logs</value>
<description>directory on hdfs where the application logs are moved to </description>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
<description>shuffle service that needs to be set for Map Reduce to run </description>
</property>
</configuration>
2.Hbase
hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://cloud1:9000/hbase</value>
<description>The directory shared by RegionServers.</description>
</property>
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
<description>The mode the cluster will be in. Possible values are
false: standalone and pseudo-distributed setups with managed Zookeeper
true: fully-distributed with unmanaged Zookeeper Quorum (see hbase-env.sh)
</description>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
<description>Property from ZooKeeper's config zoo.cfg.The port at which the clients will connect.</description>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>cloud2,cloud3,cloud4</value>
<description>The directory shared by RegionServers.</description>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/mnt/zookeeper-john</value>
<description>Property from ZooKeeper\'s config zoo.cfg. The directory where the snapshot is stored.</description>
</property>
<property>
<name>hbase.zookeeper.property.maxClientCnxns</name>
<value>1000</value>
</property>
</configuration>