소스 검색

AMBARI-3717: Config Refactor: Properties with hostnames in their values should also be surfaced on web-ui. (jaimin)

Jaimin Jetly 11 년 전
부모
커밋
be01dee804
42개의 변경된 파일917개의 추가작업 그리고 972개의 파일을 삭제
  1. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
  2. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
  3. 5 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
  4. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
  5. 24 13
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
  6. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
  7. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml
  8. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml
  9. 5 5
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
  10. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
  11. 18 8
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
  12. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
  13. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
  14. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
  15. 5 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
  16. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
  17. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
  18. 6 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml
  19. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml
  20. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml
  21. 5 5
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
  22. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
  23. 289 252
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
  24. 10 10
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
  25. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml
  26. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml
  27. 5 5
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
  28. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
  29. 288 252
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
  30. 10 10
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml
  31. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml
  32. 81 81
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml
  33. 5 5
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
  34. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
  35. 13 10
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
  36. 6 0
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml
  37. 0 115
      ambari-web/app/data/HDP2/config_mapping.js
  38. 0 13
      ambari-web/app/data/HDP2/global_properties.js
  39. 0 103
      ambari-web/app/data/config_mapping.js
  40. 0 50
      ambari-web/app/data/global_properties.js
  41. 24 1
      ambari-web/app/data/site_properties.js
  42. 98 4
      ambari-web/app/models/service_config.js

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml

@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml

@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml

@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -309,7 +309,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 24 - 13
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml

@@ -82,19 +82,20 @@
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
@@ -324,14 +325,24 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
 
 <property>
   <name>mapred.hosts.exclude</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.exclude</value>
+  <description>
+    Names a file that contains the list of hosts that
+    should be excluded by the jobtracker.  If the value is empty, no
+    hosts are excluded.
+  </description>
 </property>
 
 <property>
@@ -380,10 +391,10 @@
 
 <property>
   <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
+  <value>/etc/hadoop/conf/health_check</value>
   <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
+    in a file system to be available after it drops of the memory queue and
+    between jobtracker restarts.
   </description>
 </property>
 
@@ -518,7 +529,7 @@ process</description>
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml

@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/configuration/hbase-site.xml

@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/core-site.xml

@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml

@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -309,7 +309,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 18 - 8
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml

@@ -82,16 +82,16 @@
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>JobTracker host and http port address</description>
     <final>true</final>
   </property>
 
@@ -326,12 +326,22 @@
 
 <property>
   <name>mapred.hosts</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.include</value>
+  <description>
+    Names a file that contains the list of nodes that may
+    connect to the jobtracker.  If the value is empty, all hosts are
+    permitted.
+  </description>
 </property>
 
 <property>
   <name>mapred.hosts.exclude</name>
-  <value></value>
+  <value>/etc/hadoop/conf/mapred.exclude</value>
+  <description>
+    Names a file that contains the list of hosts that
+    should be excluded by the jobtracker.  If the value is empty, no
+    hosts are excluded.
+  </description>
 </property>
 
 <property>
@@ -380,7 +390,7 @@
 
 <property>
   <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
+  <value>/etc/hadoop/conf/health_check</value>
   <description>The directory where the job status information is persisted
    in a file system to be available after it drops of the memory queue and
    between jobtracker restarts.
@@ -518,7 +528,7 @@ process</description>
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml

@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml

@@ -309,7 +309,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
     For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
     By default this is set to localhost for local and pseudo-distributed modes

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml

@@ -50,7 +50,7 @@
   <property>
     <name>fs.defaultFS</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
   literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml

@@ -70,7 +70,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
     not permitted to connect to the namenode.  The full pathname of the
     file must be specified.  If the value is empty, no hosts are
@@ -80,7 +80,7 @@
 <!--
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
     permitted to connect to the namenode. The full pathname of the file
     must be specified.  If the value is empty, all hosts are
@@ -198,7 +198,7 @@
 
   <property>
     <name>dfs.namenode.http-address</name>
-    <value></value>
+    <value>localhost:50070</value>
 <description>The name of the default file system.  Either the
 literal string "local" or a host:port for NDFS.</description>
 <final>true</final>
@@ -319,7 +319,7 @@ Kerberos principal name for the NameNode
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -376,7 +376,7 @@ Kerberos principal name for the NameNode
 
   <property>
     <name>dfs.namenode.https-address</name>
-    <value></value>
+    <value>localhost:50470</value>
   <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml

@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 

+ 6 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/yarn-site.xml

@@ -23,6 +23,12 @@
 
   <!-- ResourceManager -->
 
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HBASE/configuration/hbase-site.xml

@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
       For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
       By default this is set to localhost for local and pseudo-distributed modes

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/core-site.xml

@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml

@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
@@ -309,7 +309,7 @@
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 289 - 252
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml

@@ -22,12 +22,14 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
+    <value>200</value>
+    <description>
+      The total amount of Map-side buffer memory to use while sorting files
+    </description>
   </property>
 
   <property>
@@ -38,8 +40,8 @@
 
   <property>
     <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection</description>
   </property>
 
   <property>
@@ -48,62 +50,63 @@
     <description>No description</description>
   </property>
 
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
 
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
     </description>
   </property>
 
   <property>
     <name>mapred.system.dir</name>
     <value>/mapred/system</value>
-    <description>No description</description>
+    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
-    <value></value>
+    <value>/hadoop/mapred</value>
     <description>No description</description>
     <final>true</final>
   </property>
 
   <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
   </property>
 
   <property>
@@ -114,13 +117,13 @@
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
+    <value>4</value>
     <description>No description</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
+    <value>2</value>
     <description>No description</description>
   </property>
 
@@ -133,14 +136,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
@@ -152,29 +155,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
+      size to storing map outputs during the shuffle.
+    </description>
   </property>
 
   <property>
@@ -185,13 +188,13 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
 
 
   <property>
@@ -201,14 +204,14 @@
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
   </property>
 
   <property>
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
+      "false" to start afresh
     </description>
   </property>
 
@@ -216,20 +219,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
   </property>
 
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -243,9 +246,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
   </property>
 
   <property>
@@ -257,9 +260,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
   </property>
 
   <property>
@@ -277,158 +280,192 @@
   <property>
     <name>mapred.cluster.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      The virtual memory size of a single Map slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      The virtual memory size of a single Reduce slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.job.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      Virtual memory for single Map task
+    </description>
   </property>
 
   <property>
     <name>mapred.job.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      Virtual memory for single Reduce task
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.map.memory.mb</name>
     <value>6144</value>
+    <description>
+      Upper limit on virtual memory size for a single Map task of any MapReduce job
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.reduce.memory.mb</name>
     <value>4096</value>
+    <description>
+      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>file:////mapred/jobstatus</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
       JT user name key.
- </description>
-</property>
+    </description>
+  </property>
 
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
 
 
   <property>
@@ -438,61 +475,61 @@
   </property>
 
 
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
 
-</property>
+  </property>
 
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
     <description>The filename of the keytab for the task tracker</description>
- </property>
+  </property>
 
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
 
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
 
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
- </property>
+  </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-process</description>
+      process</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>
@@ -500,38 +537,38 @@ process</description>
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-  <value></value>
+    <value></value>
     <description>Job history user name key. (must map to same user as JT
-user)</description>
+      user)</description>
   </property>
 
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
 
 </configuration>

+ 10 - 10
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml

@@ -25,7 +25,7 @@ limitations under the License.
 
   <property>
     <name>templeton.port</name>
-      <value>50111</value>
+    <value>50111</value>
     <description>The HTTP port for the main server.</description>
   </property>
 
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 
@@ -104,18 +104,18 @@ limitations under the License.
   </property>
 
   <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>
+      Enable the override path in templeton.override.jars
+    </description>
+  </property>
 
- <property>
+  <property>
     <name>templeton.streaming.jar</name>
     <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
+  </property>
 
   <property>
     <name>templeton.exec.timeout</name>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HBASE/configuration/hbase-site.xml

@@ -306,7 +306,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
       For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
       By default this is set to localhost for local and pseudo-distributed modes

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/core-site.xml

@@ -58,7 +58,7 @@
   <property>
     <name>fs.default.name</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml

@@ -85,7 +85,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
@@ -94,7 +94,7 @@
 
   <property>
     <name>dfs.hosts</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
@@ -168,7 +168,7 @@
 
   <property>
     <name>dfs.http.address</name>
-    <value></value>
+    <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
@@ -309,7 +309,7 @@
   <property>
     <!-- cluster variant -->
     <name>dfs.secondary.http.address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -379,7 +379,7 @@
 
   <property>
     <name>dfs.https.address</name>
-    <value></value>
+    <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 288 - 252
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml

@@ -22,12 +22,14 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
-    <value></value>
-    <description>No description</description>
+    <value>200</value>
+    <description>
+      The total amount of Map-side buffer memory to use while sorting files
+    </description>
   </property>
 
   <property>
@@ -38,8 +40,8 @@
 
   <property>
     <name>io.sort.spill.percent</name>
-    <value></value>
-    <description>No description</description>
+    <value>0.9</value>
+    <description>Percentage of sort buffer used for record collection</description>
   </property>
 
   <property>
@@ -48,62 +50,62 @@
     <description>No description</description>
   </property>
 
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
 
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
     </description>
   </property>
 
   <property>
     <name>mapred.system.dir</name>
     <value>/mapred/system</value>
-    <description>No description</description>
+    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50300</value>
+    <description>JobTracker address</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>No description</description>
+    <value>localhost:50030</value>
+    <description>JobTracker host and http port address</description>
     <final>true</final>
   </property>
 
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
-    <value></value>
+    <value>/hadoop/mapred</value>
     <description>No description</description>
     <final>true</final>
   </property>
 
   <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
   </property>
 
   <property>
@@ -114,13 +116,13 @@
 
   <property>
     <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value></value>
+    <value>4</value>
     <description>No description</description>
   </property>
 
   <property>
     <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value></value>
+    <value>2</value>
     <description>No description</description>
   </property>
 
@@ -133,14 +135,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
@@ -152,29 +154,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
+      size to storing map outputs during the shuffle.
+    </description>
   </property>
 
   <property>
@@ -185,13 +187,13 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
 
 
   <property>
@@ -201,14 +203,14 @@
 
   <property>
     <name>mapred.jobtracker.taskScheduler</name>
-    <value></value>
+    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
   </property>
 
   <property>
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
+      "false" to start afresh
     </description>
   </property>
 
@@ -216,20 +218,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
   </property>
 
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -243,9 +245,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
   </property>
 
   <property>
@@ -257,9 +259,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
   </property>
 
   <property>
@@ -277,158 +279,192 @@
   <property>
     <name>mapred.cluster.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      The virtual memory size of a single Map slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      The virtual memory size of a single Reduce slot in the MapReduce framework
+    </description>
   </property>
 
   <property>
     <name>mapred.job.map.memory.mb</name>
     <value>1536</value>
+    <description>
+      Virtual memory for single Map task
+    </description>
   </property>
 
   <property>
     <name>mapred.job.reduce.memory.mb</name>
     <value>2048</value>
+    <description>
+      Virtual memory for single Reduce task
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.map.memory.mb</name>
     <value>6144</value>
+    <description>
+      Upper limit on virtual memory size for a single Map task of any MapReduce job
+    </description>
   </property>
 
   <property>
     <name>mapred.cluster.max.reduce.memory.mb</name>
     <value>4096</value>
+    <description>
+      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>file:////mapred/jobstatus</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value></value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value></value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value></value>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
       JT user name key.
- </description>
-</property>
+    </description>
+  </property>
 
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
 
 
   <property>
@@ -438,61 +474,61 @@
   </property>
 
 
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
 
-</property>
+  </property>
 
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
     <description>The filename of the keytab for the task tracker</description>
- </property>
+  </property>
 
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
 
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
 
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
- </property>
+  </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-process</description>
+      process</description>
     <final>true</final>
   </property>
 
   <property>
     <name>mapreduce.history.server.http.address</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>localhost:51111</value>
     <description>Http address of the history server</description>
     <final>true</final>
   </property>
@@ -500,38 +536,38 @@ process</description>
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-  <value></value>
+    <value></value>
     <description>Job history user name key. (must map to same user as JT
-user)</description>
+      user)</description>
   </property>
 
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
 
 </configuration>

+ 10 - 10
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/WEBHCAT/configuration/webhcat-site.xml

@@ -25,7 +25,7 @@ limitations under the License.
 
   <property>
     <name>templeton.port</name>
-      <value>50111</value>
+    <value>50111</value>
     <description>The HTTP port for the main server.</description>
   </property>
 
@@ -93,7 +93,7 @@ limitations under the License.
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 
@@ -104,18 +104,18 @@ limitations under the License.
   </property>
 
   <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>
+      Enable the override path in templeton.override.jars
+    </description>
+  </property>
 
- <property>
+  <property>
     <name>templeton.streaming.jar</name>
     <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
+  </property>
 
   <property>
     <name>templeton.exec.timeout</name>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HBASE/configuration/hbase-site.xml

@@ -309,7 +309,7 @@
   -->
   <property>
     <name>hbase.zookeeper.quorum</name>
-    <value></value>
+    <value>localhost</value>
     <description>Comma separated list of servers in the ZooKeeper Quorum.
       For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
       By default this is set to localhost for local and pseudo-distributed modes

+ 81 - 81
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/core-site.xml

@@ -1,36 +1,36 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
 <!-- Put site-specific property overrides in this file. -->
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.file.buffer.size</name>
     <value>131072</value>
     <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
+      The size of this buffer should probably be a multiple of hardware
+      page size (4096 on Intel x86), and it determines how much data is
+      buffered during read and write operations.</description>
   </property>
 
   <property>
@@ -42,17 +42,17 @@
     <name>io.compression.codecs</name>
     <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
     <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
+      for compression/decompression.</description>
   </property>
 
-<!-- file system properties -->
+  <!-- file system properties -->
 
   <property>
     <name>fs.defaultFS</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>hdfs://localhost:8020</value>
     <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
+      literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
   </property>
 
@@ -60,8 +60,8 @@
     <name>fs.trash.interval</name>
     <value>360</value>
     <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
+      If zero, the trash feature is disabled.
+    </description>
   </property>
 
   <!-- ipc properties: copied from kryptonite configuration -->
@@ -69,16 +69,16 @@
     <name>ipc.client.idlethreshold</name>
     <value>8000</value>
     <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
+      connections will be inspected for idleness.
+    </description>
   </property>
 
   <property>
     <name>ipc.client.connection.maxidletime</name>
     <value>30000</value>
     <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
+      connection to the server.
+    </description>
   </property>
 
   <property>
@@ -92,75 +92,75 @@
     <name>mapreduce.jobtracker.webinterface.trusted</name>
     <value>false</value>
     <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
+      actions, such as kill job, delete file, etc., that should
+      not be exposed to public. Enable this option if the interfaces
+      are only reachable by those who have the right authorization.
+    </description>
   </property>
 
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+    <description>
+      Set the authentication for the cluster. Valid values are: simple or
+      kerberos.
+    </description>
+  </property>
+  <property>
+    <name>hadoop.security.authorization</name>
+    <value>false</value>
+    <description>
+      Enable authorization for different protocols.
+    </description>
+  </property>
 
   <property>
     <name>hadoop.security.auth_to_local</name>
     <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
+      RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+      RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+      RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+      RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+      RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+      DEFAULT
     </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
+    <description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+      So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+      "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+      The translations rules have 3 sections:
       base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+      The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
 
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+      [1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+      [2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+      [2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
 
-The filter is a regex in parens that must the generated string for the rule to apply.
+      The filter is a regex in parens that must the generated string for the rule to apply.
 
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+      "(.*%admin)" will take any string that ends in "%admin"
+      "(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
 
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
+      Finally, the substitution is a sed rule to translate a regex into a fixed string.
 
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
+      "s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+      "s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+      "s/X/Y/g" replaces all of the "X" in the name with "Y"
 
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+      So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
 
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
+      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+      DEFAULT
 
-To also translate the names with a second component, you'd make the rules:
+      To also translate the names with a second component, you'd make the rules:
 
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
+      RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+      RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+      DEFAULT
 
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+      If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
 
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
+      RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+      DEFAULT
     </description>
   </property>
 

+ 5 - 5
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml

@@ -70,7 +70,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value></value>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are
@@ -80,7 +80,7 @@
   <!--
     <property>
       <name>dfs.hosts</name>
-      <value></value>
+      <value>/etc/hadoop/conf/dfs.include</value>
       <description>Names a file that contains a list of hosts that are
       permitted to connect to the namenode. The full pathname of the file
       must be specified.  If the value is empty, all hosts are
@@ -198,7 +198,7 @@
 
   <property>
     <name>dfs.namenode.http-address</name>
-    <value></value>
+    <value>localhost:50070</value>
     <description>The name of the default file system.  Either the
       literal string "local" or a host:port for NDFS.</description>
     <final>true</final>
@@ -319,7 +319,7 @@
   <property>
     <!-- cluster variant -->
     <name>dfs.namenode.secondary.http-address</name>
-    <value></value>
+    <value>localhost:50090</value>
     <description>Address of secondary namenode web server</description>
   </property>
 
@@ -376,7 +376,7 @@
 
   <property>
     <name>dfs.namenode.https-address</name>
-    <value></value>
+    <value>localhost:50470</value>
     <description>The https address where namenode binds</description>
 
   </property>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml

@@ -77,7 +77,7 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.uris</name>
-    <value></value>
+    <value>thrift://localhost:9083</value>
     <description>URI for client to contact metastore server</description>
   </property>
 

+ 13 - 10
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml

@@ -25,7 +25,7 @@ limitations under the License.
 
   <property>
     <name>templeton.port</name>
-      <value>50111</value>
+    <value>50111</value>
     <description>The HTTP port for the main server.</description>
   </property>
 
@@ -87,13 +87,16 @@ limitations under the License.
   <property>
     <name>templeton.hive.properties</name>
     <value></value>
+
+
+
     <description>Properties to set when running hive.</description>
   </property>
 
 
   <property>
     <name>templeton.zookeeper.hosts</name>
-    <value></value>
+    <value>localhost:2181</value>
     <description>ZooKeeper servers, as comma separated host:port pairs</description>
   </property>
 
@@ -104,18 +107,18 @@ limitations under the License.
   </property>
 
   <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
+    <name>templeton.override.enabled</name>
+    <value>false</value>
+    <description>
+      Enable the override path in templeton.override.jars
+    </description>
+  </property>
 
- <property>
+  <property>
     <name>templeton.streaming.jar</name>
     <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
     <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
+  </property>
 
   <property>
     <name>templeton.exec.timeout</name>

+ 6 - 0
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/YARN/configuration/yarn-site.xml

@@ -23,6 +23,12 @@
 
   <!-- ResourceManager -->
 
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
   <property>
     <name>yarn.resourcemanager.resource-tracker.address</name>
     <value>localhost:8025</value>

+ 0 - 115
ambari-web/app/data/HDP2/config_mapping.js

@@ -19,16 +19,6 @@
 var App = require('app');
 var configs = [
 /**********************************************core-site***************************************/
-  {
-    "name": "fs.defaultFS",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "hdfs://<templateName[0]>:8020",
-    "precondition": function () {
-      return (App.HDFSService.find('HDFS') && App.HDFSService.find('HDFS').get('snameNode'));
-    },
-    "filename": "core-site.xml"
-  },
   {
     "name": "hadoop.proxyuser.<foreignKey[0]>.groups",
     "templateName": ["proxyuser_group"],
@@ -78,13 +68,6 @@ var configs = [
     "isOverridable": true
   },
 /**********************************************hdfs-site***************************************/
-  {
-    "name": "dfs.hosts.exclude",
-    "templateName": ["hadoop_conf_dir", "dfs_exclude"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/<templateName[1]>",
-    "filename": "hdfs-site.xml"
-  },
   {
     "name": "dfs.datanode.address",
     "templateName": ["dfs_datanode_address"],
@@ -99,104 +82,6 @@ var configs = [
     "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml"
   },
-  {
-    "name": "dfs.namenode.http-address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50070",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.namenode.secondary.http-address",
-    "templateName": ["snamenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50090",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.namenode.https-address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50470",
-    "filename": "hdfs-site.xml"
-  },
-
-/**********************************************oozie-site***************************************/
-  {
-    "name": "oozie.base.url",
-    "templateName": ["oozieserver_host"],
-    "foreignKey": null,
-    "value": "http://<templateName[0]>:11000/oozie",
-    "filename": "oozie-site.xml"
-  },
-
-/**********************************************hive-site***************************************/
-  {
-    "name": "hive.metastore.uris",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": null,
-    "value": "thrift://<templateName[0]>:9083",
-    "filename": "hive-site.xml"
-  },
-/**********************************************yarn-site***************************************/
-  {
-    "name": "yarn.resourcemanager.hostname",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>",
-    "filename": "yarn-site.xml"
-  },
-  {
-    "name": "yarn.resourcemanager.webapp.address",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:8088",
-    "filename": "yarn-site.xml"
-  },
-  {
-    "name": "yarn.resourcemanager.resource-tracker.address",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:8025",
-    "filename": "yarn-site.xml"
-  },
-  {
-    "name": "yarn.resourcemanager.scheduler.address",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:8030",
-    "filename": "yarn-site.xml"
-  },
-  {
-    "name": "yarn.resourcemanager.address",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:8050",
-    "filename": "yarn-site.xml"
-  },
-  {
-    "name": "yarn.resourcemanager.admin.address",
-    "templateName": ["rm_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:8141",
-    "filename": "yarn-site.xml"
-  },
-
-/**********************************************mapred-site***************************************/
-  {
-    "name": "mapreduce.jobhistory.webapp.address",
-    "templateName": ["hs_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:19888",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.jobhistory.address",
-    "templateName": ["hs_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:10020",
-    "filename": "mapred-site.xml"
-  },
 
 /**********************************************hbase-site***************************************/
   {

+ 0 - 13
ambari-web/app/data/HDP2/global_properties.js

@@ -221,19 +221,6 @@ module.exports =
       "serviceName": "HDFS",
       "category": "NameNode"
     },
-    {
-      "id": "puppet var",
-      "name": "dfs_exclude",
-      "displayName": "Exclude hosts",
-      "description": "Names a file that contains a list of hosts that are not permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
-      "defaultValue": "dfs.exclude",
-      "displayType": "advanced",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
     {
       "id": "puppet var",
       "name": "security_enabled",

+ 0 - 103
ambari-web/app/data/config_mapping.js

@@ -18,13 +18,6 @@
 
 var configs = [
   /**********************************************HDFS***************************************/
-  {
-    "name": "fs.default.name",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "hdfs://<templateName[0]>:8020",
-    "filename": "core-site.xml"
-  },
   {
     "name": "hadoop.proxyuser.<foreignKey[0]>.groups",
     "templateName": ["proxyuser_group"],
@@ -73,20 +66,6 @@ var configs = [
     "filename": "core-site.xml",
     "isOverridable" : true
   },
-  {
-    "name": "dfs.hosts.exclude",
-    "templateName": ["hadoop_conf_dir", "dfs_exclude"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/<templateName[1]>",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.hosts",
-    "templateName": ["hadoop_conf_dir", "dfs_include"],
-    "foreignKey": null,
-    "value": "<templateName[0]>\/<templateName[1]>",
-    "filename": "hdfs-site.xml"
-  },
   {
     "name": "dfs.datanode.address",
     "templateName": ["dfs_datanode_address"],
@@ -101,50 +80,8 @@ var configs = [
     "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml"
   },
-  {
-    "name": "dfs.http.address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50070",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.secondary.http.address",
-    "templateName": ["snamenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50090",
-    "filename": "hdfs-site.xml"
-  },
-  {
-    "name": "dfs.https.address",
-    "templateName": ["namenode_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50470",
-    "filename": "hdfs-site.xml"
-  },
 
   /******************************************MAPREDUCE***************************************/
-  {
-    "name": "mapred.hosts",
-    "templateName": ["hadoop_conf_dir", "mapred_hosts_include"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/<templateName[1]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.hosts.exclude",
-    "templateName": ["hadoop_conf_dir", "mapred_hosts_exclude"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/<templateName[1]>",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.tracker.persist.jobstatus.dir",
-    "templateName": ["hadoop_conf_dir"],
-    "foreignKey": null,
-    "value": "<templateName[0]>/health_check",
-    "filename": "mapred-site.xml"
-  },
   {
     "name": "mapred.child.java.opts",
     "templateName": ["mapred_child_java_opts_sz"],
@@ -152,46 +89,6 @@ var configs = [
     "value": "-server -Xmx<templateName[0]>m -Djava.net.preferIPv4Stack=true",
     "filename": "mapred-site.xml"
   },
-  {
-    "name": "mapred.job.tracker",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50300",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapred.job.tracker.http.address",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:50030",
-    "filename": "mapred-site.xml"
-  },
-  {
-    "name": "mapreduce.history.server.http.address",
-    "templateName": ["jobtracker_host"],
-    "foreignKey": null,
-    "value": "<templateName[0]>:51111",
-    "filename": "mapred-site.xml"
-  },
-
-  /**********************************************oozie-site***************************************/
-  {
-    "name": "oozie.base.url",
-    "templateName": ["oozieserver_host"],
-    "foreignKey": null,
-    "value": "http://<templateName[0]>:11000/oozie",
-    "filename": "oozie-site.xml"
-  },
-
-  /**********************************************hive-site***************************************/
-
-  {
-    "name": "hive.metastore.uris",
-    "templateName": ["hivemetastore_host"],
-    "foreignKey": null,
-    "value": "thrift://<templateName[0]>:9083",
-    "filename": "hive-site.xml"
-  },
 
 /**********************************************hbase-site***************************************/
   {

+ 0 - 50
ambari-web/app/data/global_properties.js

@@ -234,32 +234,6 @@ module.exports =
       "serviceName": "HDFS",
       "category": "NameNode"
     },
-    {
-      "id": "puppet var",
-      "name": "dfs_exclude",
-      "displayName": "Exclude hosts",
-      "description": "Names a file that contains a list of hosts that are not permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
-      "defaultValue": "dfs.exclude",
-      "displayType": "advanced",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "dfs_include",
-      "displayName": "Include hosts",
-      "description": "Names a file that contains a list of hosts that are permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
-      "defaultValue": "dfs.include",
-      "displayType": "advanced",
-      "isVisible": true,
-      "filename": "hdfs-site.xml",
-      "domain": "global",
-      "serviceName": "HDFS",
-      "category": "Advanced"
-    },
     {
       "id": "puppet var",
       "name": "security_enabled",
@@ -519,30 +493,6 @@ module.exports =
       "serviceName": "MAPREDUCE",
       "index": 13
     },
-    {
-      "id": "puppet var",
-      "name": "mapred_hosts_exclude",
-      "displayName": "Exclude hosts",
-      "description": "Exclude entered hosts",
-      "defaultValue": "mapred.exclude",
-      "displayType": "directories",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
-    {
-      "id": "puppet var",
-      "name": "mapred_hosts_include",
-      "displayName": "Include hosts",
-      "description": "Include enetered hosts",
-      "defaultValue": "mapred.include",
-      "displayType": "directories",
-      "isVisible": false,
-      "serviceName": "MAPREDUCE",
-      "domain": "global",
-      "category": "Advanced"
-    },
   /**********************************************HBASE***************************************/
     {
       "id": "puppet var",

+ 24 - 1
ambari-web/app/data/site_properties.js

@@ -85,7 +85,6 @@ module.exports =
       "serviceName": "HDFS",
       "index": 0
     },
-
     {
       "id": "site property",
       "name": "dfs.datanode.failed.volumes.tolerated",
@@ -321,6 +320,30 @@ module.exports =
       "serviceName": "MAPREDUCE",
       "index": 10
     },
+    {
+      "id": "site property",
+      "name": "mapred.hosts",
+      "displayName": "mapred.hosts",
+      "description": "Names a file that contains the list of nodes that may\
+      connect to the jobtracker.  If the value is empty, all hosts are \
+      permitted.",
+      "defaultValue": "",
+      "displayType": "directory",
+      "category": "Advanced",
+      "serviceName": "MAPREDUCE"
+    },
+    {
+      "id": "site property",
+      "name": "mapred.hosts.exclude",
+      "displayName": "mapred.hosts.exclude",
+      "description": " Names a file that contains the list of hosts that\
+      should be excluded by the jobtracker.  If the value is empty, no\
+      hosts are excluded.",
+      "defaultValue": "",
+      "displayType": "directory",
+      "category": "Advanced",
+      "serviceName": "MAPREDUCE"
+    },
 
   /**********************************************oozie-site***************************************/
     {

+ 98 - 4
ambari-web/app/models/service_config.js

@@ -238,6 +238,30 @@ App.ServiceConfigProperty = Ember.Object.extend({
       case 'namenode_host':
         this.set('value', masterComponentHostsInDB.filterProperty('component', 'NAMENODE').mapProperty('hostName'));
         break;
+      case 'dfs.http.address':
+        var nnHost =  masterComponentHostsInDB.findProperty('component', 'NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",nnHost);
+        break;
+      case 'dfs.namenode.http-address':
+        var nnHost =  masterComponentHostsInDB.findProperty('component', 'NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",nnHost);
+        break;
+      case 'dfs.https.address':
+        var nnHost =  masterComponentHostsInDB.findProperty('component', 'NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",nnHost);
+        break;
+      case 'dfs.namenode.https-address':
+        var nnHost =  masterComponentHostsInDB.findProperty('component', 'NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",nnHost);
+        break;
+      case 'fs.default.name':
+        var nnHost = masterComponentHostsInDB.filterProperty('component', 'NAMENODE').mapProperty('hostName');
+        this.setDefaultValue(":\/\/(\\w*)(?=:)",'://' + nnHost);
+        break;
+      case 'fs.defaultFS':
+        var nnHost = masterComponentHostsInDB.filterProperty('component', 'NAMENODE').mapProperty('hostName');
+        this.setDefaultValue(":\/\/(\\w*)(?=:)",'://' + nnHost);
+        break;
       case 'snamenode_host':
         // Secondary NameNode does not exist when NameNode HA is enabled
         var snn = masterComponentHostsInDB.findProperty('component', 'SECONDARY_NAMENODE');
@@ -245,6 +269,14 @@ App.ServiceConfigProperty = Ember.Object.extend({
           this.set('value', snn.hostName);
         }
         break;
+      case 'dfs.secondary.http.address':
+        var snnHost = masterComponentHostsInDB.findProperty('component', 'SECONDARY_NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",snnHost);
+        break;
+      case 'dfs.namenode.secondary.http-address':
+        var snnHost = masterComponentHostsInDB.findProperty('component', 'SECONDARY_NAMENODE').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",snnHost);
+        break;
       case 'datanode_hosts':
         this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'DATANODE').hosts.mapProperty('hostName'));
         break;
@@ -253,20 +285,62 @@ App.ServiceConfigProperty = Ember.Object.extend({
         break;
       case 'yarn.log.server.url':
         var hsHost = masterComponentHostsInDB.filterProperty('component', 'HISTORYSERVER').mapProperty('hostName');
-        var defaultValue = this.get('defaultValue');
-        defaultValue = defaultValue.replace(/:\/\/(\w*)(?=:)/,'://' + hsHost);
-        this.set('defaultValue',defaultValue);
-        this.set('value',this.get('defaultValue'));
+        this.setDefaultValue(":\/\/(\\w*)(?=:)",'://' + hsHost);
+        break;
+      case 'mapreduce.jobhistory.webapp.address':
+        var hsHost = masterComponentHostsInDB.filterProperty('component', 'HISTORYSERVER').mapProperty('hostName');
+        this.setDefaultValue("(\\w*)(?=:)",hsHost);
+        break;
+      case 'mapreduce.jobhistory.address':
+        var hsHost = masterComponentHostsInDB.filterProperty('component', 'HISTORYSERVER').mapProperty('hostName');
+        this.setDefaultValue("(\\w*)(?=:)",hsHost);
         break;
       case 'rm_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName);
         break;
+      case 'yarn.resourcemanager.hostname':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.set('defaultValue',rmHost);
+        this.set('value',this.get('defaultValue'));
+        break;
+      case 'yarn.resourcemanager.resource-tracker.address':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",rmHost);
+        break;
+      case 'yarn.resourcemanager.webapp.address':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",rmHost);
+        break;
+      case 'yarn.resourcemanager.scheduler.address':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",rmHost);
+        break;
+      case 'yarn.resourcemanager.address':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",rmHost);
+        break;
+      case 'yarn.resourcemanager.admin.address':
+        var rmHost = masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",rmHost);
+        break;
       case 'nm_hosts':
         this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'NODEMANAGER').hosts.mapProperty('hostName'));
         break;
       case 'jobtracker_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName);
         break;
+      case 'mapred.job.tracker':
+        var jtHost = masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",jtHost);
+        break;
+      case 'mapred.job.tracker.http.address':
+        var jtHost = masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",jtHost);
+        break;
+      case 'mapreduce.history.server.http.address':
+        var jtHost = masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName;
+        this.setDefaultValue("(\\w*)(?=:)",jtHost);
+        break;
       case 'tasktracker_hosts':
         this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER').hosts.mapProperty('hostName'));
         break;
@@ -279,12 +353,20 @@ App.ServiceConfigProperty = Ember.Object.extend({
       case 'hivemetastore_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'HIVE_SERVER').hostName);
         break;
+      case 'hive.metastore.uris':
+        var hiveHost = masterComponentHostsInDB.findProperty('component', 'HIVE_SERVER').hostName;
+        this.setDefaultValue(":\/\/(\\w*)(?=:)",'://' + hiveHost);
+        break;
       case 'hive_ambari_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'HIVE_SERVER').hostName);
         break;
       case 'oozieserver_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'OOZIE_SERVER').hostName);
         break;
+      case 'oozie.base.url':
+        var oozieHost = masterComponentHostsInDB.findProperty('component', 'OOZIE_SERVER').hostName;
+        this.setDefaultValue(":\/\/(\\w*)(?=:)",'://' + oozieHost);
+        break;
       case 'webhcatserver_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'WEBHCAT_SERVER').hostName);
         break;
@@ -316,6 +398,18 @@ App.ServiceConfigProperty = Ember.Object.extend({
     }
   },
 
+  /**
+   * @param regex : String
+   * @param replaceWith : String
+   */
+  setDefaultValue: function(regex,replaceWith) {
+    var defaultValue = this.get('defaultValue');
+    var re = new RegExp(regex);
+    defaultValue = defaultValue.replace(re,replaceWith);
+    this.set('defaultValue',defaultValue);
+    this.set('value',this.get('defaultValue'));
+  },
+
   unionAllMountPoints: function (isOnlyFirstOneNeeded, localDB) {
     var hostname = '';
     var mountPointsPerHost = [];