Переглянути джерело

Revert "AMBARI-3755: Config Refactor: Installer wizard should not save global configuration not used by agent. (jaimin)"

This reverts commit fb60f21dac3e98a7925cdac76fc4225118cec5cb.
Yusaku Sako 11 роки тому
батько
коміт
05be921e3e
33 змінених файлів з 1282 додано та 810 видалено
  1. 188 198
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
  2. 4 10
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
  3. 250 255
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
  4. 2 12
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
  5. 0 6
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
  6. 2 9
      ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
  7. 2 12
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
  8. 0 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
  9. 2 12
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml
  10. 0 6
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml
  11. 5 10
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
  12. 2 12
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml
  13. 0 6
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml
  14. 2 9
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
  15. 2 12
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml
  16. 0 7
      ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml
  17. 6 0
      ambari-web/app/controllers/main/admin/security/add/step4.js
  18. 4 4
      ambari-web/app/controllers/main/admin/security/disable.js
  19. 1 1
      ambari-web/app/controllers/main/service/info/configs.js
  20. 0 1
      ambari-web/app/controllers/wizard.js
  21. 11 4
      ambari-web/app/controllers/wizard/step8_controller.js
  22. 15 0
      ambari-web/app/data/HDP2/config_mapping.js
  23. 170 49
      ambari-web/app/data/HDP2/global_properties.js
  24. 4 4
      ambari-web/app/data/HDP2/secure_mapping.js
  25. 0 14
      ambari-web/app/data/HDP2/site_properties.js
  26. 23 0
      ambari-web/app/data/config_mapping.js
  27. 504 22
      ambari-web/app/data/global_properties.js
  28. 4 4
      ambari-web/app/data/secure_mapping.js
  29. 0 26
      ambari-web/app/data/site_properties.js
  30. 0 1
      ambari-web/app/models/service_config.js
  31. 78 92
      ambari-web/app/utils/config.js
  32. 0 4
      ambari-web/app/utils/helper.js
  33. 1 1
      ambari-web/app/views/wizard/controls_view.js

+ 188 - 198
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml

@@ -22,7 +22,7 @@
 
 <configuration>
 
-  <!-- file system properties -->
+<!-- file system properties -->
 
   <property>
     <name>dfs.name.dir</name>
@@ -49,7 +49,7 @@
     <final>true</final>
   </property>
 
-  <property>
+ <property>
     <name>dfs.datanode.socket.write.timeout</name>
     <value>0</value>
     <description>DFS Client write socket timeout</description>
@@ -66,7 +66,7 @@
     <name>dfs.block.local-path-access.user</name>
     <value>hbase</value>
     <description>the user who is allowed to perform short
-      circuit reads.
+    circuit reads.
     </description>
     <final>true</final>
   </property>
@@ -75,11 +75,11 @@
     <name>dfs.data.dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
+  should store its blocks.  If this is a comma-delimited
+  list of directories, then data will be stored in all named
+  directories, typically on different devices.
+  Directories that do not exist are ignored.
+  </description>
     <final>true</final>
   </property>
 
@@ -87,32 +87,32 @@
     <name>dfs.hosts.exclude</name>
     <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
+    not permitted to connect to the namenode.  The full pathname of the
+    file must be specified.  If the value is empty, no hosts are
+    excluded.</description>
   </property>
 
   <property>
     <name>dfs.hosts</name>
     <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
+    permitted to connect to the namenode. The full pathname of the file
+    must be specified.  If the value is empty, all hosts are
+    permitted.</description>
   </property>
 
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
-    </description>
+  </description>
   </property>
 
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
-    </description>
+  </description>
   </property>
 
   <property>
@@ -125,21 +125,21 @@
     <name>dfs.safemode.threshold.pct</name>
     <value>1.0f</value>
     <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
+        Specifies the percentage of blocks that should satisfy
+        the minimal replication requirement defined by dfs.replication.min.
+        Values less than or equal to 0 mean not to start in safe mode.
+        Values greater than 1 will make safe mode permanent.
+        </description>
   </property>
 
   <property>
     <name>dfs.balance.bandwidthPerSec</name>
     <value>6250000</value>
     <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
+        Specifies the maximum amount of bandwidth that each datanode
+        can utilize for the balancing purpose in term of
+        the number of bytes per second.
+  </description>
   </property>
 
   <property>
@@ -150,24 +150,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value></value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value></value>
   </property>
 
   <property>
@@ -179,133 +169,133 @@
   <property>
     <name>dfs.http.address</name>
     <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
+<description>The name of the default file system.  Either the
+literal string "local" or a host:port for NDFS.</description>
+<final>true</final>
+</property>
+
+<property>
+<name>dfs.datanode.du.reserved</name>
+<!-- cluster variant -->
+<value>1073741824</value>
+<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+</description>
+</property>
+
+<property>
+<name>dfs.datanode.ipc.address</name>
+<value>0.0.0.0:8010</value>
+<description>
+The datanode ipc server address and port.
+If the port is 0 then the server will start on a free port.
+</description>
+</property>
+
+<property>
+<name>dfs.blockreport.initialDelay</name>
+<value>120</value>
+<description>Delay for first block report in seconds.</description>
+</property>
+
+<property>
+<name>dfs.datanode.du.pct</name>
+<value>0.85f</value>
+<description>When calculating remaining space, only use this percentage of the real available space
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>40</value>
+<description>The number of server threads for the namenode.</description>
+</property>
+
+<property>
+<name>dfs.datanode.max.xcievers</name>
+<value>4096</value>
+<description>PRIVATE CONFIG VARIABLE</description>
+</property>
+
+<!-- Permissions configuration -->
+
+<property>
+<name>dfs.umaskmode</name>
+<value>077</value>
+<description>
+The octal umask used when creating files and directories.
+</description>
+</property>
+
+<property>
+<name>dfs.web.ugi</name>
+<!-- cluster variant -->
+<value>gopher,gopher</value>
+<description>The user account used by the web interface.
+Syntax: USERNAME,GROUP1,GROUP2, ...
+</description>
+</property>
+
+<property>
+<name>dfs.permissions</name>
+<value>true</value>
+<description>
+If "true", enable permission checking in HDFS.
+If "false", permission checking is turned off,
+but all other behavior is unchanged.
+Switching from one parameter value to the other does not change the mode,
+owner or group of files or directories.
+</description>
+</property>
+
+<property>
+<name>dfs.permissions.supergroup</name>
+<value>hdfs</value>
+<description>The name of the group of super-users.</description>
+</property>
+
+<property>
+<name>dfs.namenode.handler.count</name>
+<value>100</value>
+<description>Added to grow Queue size so that more client connections are allowed</description>
+</property>
+
+<property>
+<name>ipc.server.max.response.size</name>
+<value>5242880</value>
+</property>
+<property>
+<name>dfs.block.access.token.enable</name>
+<value>true</value>
+<description>
+If "true", access tokens are used as capabilities for accessing datanodes.
+If "false", no access tokens are checked on accessing datanodes.
+</description>
+</property>
+
+<property>
+<name>dfs.namenode.kerberos.principal</name>
+<value></value>
+<description>
+Kerberos principal name for the NameNode
+</description>
+</property>
+
+<property>
+<name>dfs.secondary.namenode.kerberos.principal</name>
+<value></value>
     <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
+        Kerberos principal name for the secondary NameNode.
     </description>
   </property>
 
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
 
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
+<!--
+  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+-->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
     <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+     <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
@@ -351,84 +341,84 @@
   <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+ <description>
+        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.keytab.file</name>
     <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
+ <description>
+        Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
     <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
+  <description>
+        Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.datanode.keytab.file</name>
     <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
+ <description>
+        The filename of the keytab file for the DataNode.
     </description>
   </property>
 
   <property>
     <name>dfs.https.port</name>
     <value>50470</value>
-    <description>The https port where namenode binds</description>
+ <description>The https port where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.https.address</name>
     <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
+  <description>The https address where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
+<description>The permissions that should be there on dfs.data.dir
+directories. The datanode will not come up if the permissions are
+different on existing dfs.data.dir directories. If the directories
+don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+  <name>dfs.access.time.precision</name>
+  <value>0</value>
+  <description>The access time for HDFS file is precise upto this value.
+               The default value is 1 hour. Setting a value of 0 disables
+               access times for HDFS.
+  </description>
+</property>
+
+<property>
+ <name>dfs.cluster.administrators</name>
+ <value> hdfs</value>
+ <description>ACL for who all can view the default servlets in the HDFS</description>
+</property>
+
+<property>
+  <name>ipc.server.read.threadpool.size</name>
+  <value>5</value>
+  <description></description>
+</property>
+
+<property>
+  <name>dfs.datanode.failed.volumes.tolerated</name>
+  <value>0</value>
+  <description>Number of failed disks datanode would tolerate</description>
+</property>
 
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>

+ 4 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml

@@ -18,12 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>
@@ -58,21 +52,21 @@ limitations under the License.
     <name>hive.metastore.sasl.enabled</name>
     <value></value>
     <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
+     Clients must authenticate with Kerberos.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value></value>
     <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
+     thrift server's service principal.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.principal</name>
     <value></value>
     <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
+    string _HOST will be replaced automatically with the correct host name.</description>
   </property>
 
   <property>
@@ -115,7 +109,7 @@ limitations under the License.
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
   </property>
 
   <property>

+ 250 - 255
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml

@@ -22,7 +22,7 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-  <!-- i/o properties -->
+<!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
@@ -50,25 +50,25 @@
     <description>No description</description>
   </property>
 
-  <!-- map/reduce properties -->
+<!-- map/reduce properties -->
 
-  <property>
-    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-    <value>250</value>
-    <description>Normally, this is the amount of time before killing
-      processes, and the recommended-default is 5.000 seconds - a value of
-      5000 here.  In this case, we are using it solely to blast tasks before
-      killing them, and killing them very quickly (1/4 second) to guarantee
-      that we do not leave VMs around for later jobs.
-    </description>
-  </property>
+<property>
+  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+  <value>250</value>
+  <description>Normally, this is the amount of time before killing
+  processes, and the recommended-default is 5.000 seconds - a value of
+  5000 here.  In this case, we are using it solely to blast tasks before
+  killing them, and killing them very quickly (1/4 second) to guarantee
+  that we do not leave VMs around for later jobs.
+  </description>
+</property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-      The number of server threads for the JobTracker. This should be roughly
-      4% of the number of tasktracker nodes.
+    The number of server threads for the JobTracker. This should be roughly
+    4% of the number of tasktracker nodes.
     </description>
   </property>
 
@@ -91,10 +91,11 @@
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
     <value>localhost:50030</value>
-    <description>JobTracker host and http port address</description>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
@@ -104,8 +105,8 @@
   </property>
 
   <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
+  <name>mapreduce.cluster.administrators</name>
+  <value> hadoop</value>
   </property>
 
   <property>
@@ -135,14 +136,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-      may be executed in parallel.</description>
+               may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-      may be executed in parallel.</description>
+               may be executed in parallel.</description>
   </property>
 
   <property>
@@ -154,29 +155,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
+  for the in-memory merge process. When we accumulate threshold number of files
+  we initiate the in-memory merge and spill to disk. A value of 0 or less than
+  0 indicates we want to DON'T have any threshold and instead depend only on
+  the ramfs's memory consumption to trigger the merge.
+  </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapred.job.shuffle.input.buffer.percent.
-    </description>
+  initiated, expressed as a percentage of the total memory allocated to
+  storing in-memory map outputs, as defined by
+  mapred.job.shuffle.input.buffer.percent.
+  </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
+  size to storing map outputs during the shuffle.
+  </description>
   </property>
 
   <property>
@@ -187,13 +188,13 @@
     </description>
   </property>
 
-  <property>
-    <name>mapred.output.compression.type</name>
-    <value>BLOCK</value>
-    <description>If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
+<property>
+  <name>mapred.output.compression.type</name>
+  <value>BLOCK</value>
+  <description>If the job outputs are to compressed as SequenceFiles, how should
+               they be compressed? Should be one of NONE, RECORD or BLOCK.
+  </description>
+</property>
 
 
   <property>
@@ -210,7 +211,7 @@
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-      "false" to start afresh
+               "false" to start afresh
     </description>
   </property>
 
@@ -218,20 +219,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
+  retain map outputs during the reduce. When the shuffle is concluded, any
+  remaining map outputs in memory must consume less than this threshold before
+  the reduce can begin.
+  </description>
   </property>
 
-  <property>
-    <name>mapreduce.reduce.input.limit</name>
-    <value>10737418240</value>
-    <description>The limit on the input size of the reduce. (This value
-      is 10 Gb.)  If the estimated input size of the reduce is greater than
-      this value, job is failed. A value of -1 means that there is no limit
-      set. </description>
-  </property>
+ <property>
+  <name>mapreduce.reduce.input.limit</name>
+  <value>10737418240</value>
+  <description>The limit on the input size of the reduce. (This value
+  is 10 Gb.)  If the estimated input size of the reduce is greater than
+  this value, job is failed. A value of -1 means that there is no limit
+  set. </description>
+</property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -245,9 +246,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
+  terminated if it neither reads an input, writes an output, nor
+  updates its status string.
+  </description>
   </property>
 
   <property>
@@ -259,9 +260,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>
-      TaskController which is used to launch and manage task execution.
-    </description>
+   <description>
+     TaskController which is used to launch and manage task execution.
+  </description>
   </property>
 
   <property>
@@ -269,17 +270,11 @@
     <value>INFO,TLA</value>
   </property>
 
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
-    <description>Java options for the TaskTracker child processes</description>
+    <value></value>
+
+    <description>No description</description>
   </property>
 
   <property>
@@ -294,7 +289,7 @@
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
     <description>
-      The virtual memory size of a single Reduce slot in the MapReduce framework
+    The virtual memory size of a single Reduce slot in the MapReduce framework
     </description>
   </property>
 
@@ -340,137 +335,137 @@
     </description>
   </property>
 
-  <property>
-    <name>mapred.hosts.exclude</name>
-    <value>/etc/hadoop/conf/mapred.exclude</value>
-    <description>
-      Names a file that contains the list of hosts that
-      should be excluded by the jobtracker.  If the value is empty, no
-      hosts are excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.max.tracker.blacklists</name>
-    <value>16</value>
-    <description>
-      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>file:////mapred/jobstatus</value>
-    <description>
-      Directory path to view job status
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.interval</name>
-    <value>135000</value>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.timeout</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.hours</name>
-    <value>1</value>
-    <description>The number of hours job status information is persisted in DFS.
-      The job status information will be available after it drops of the memory
-      queue and between jobtracker restarts. With a zero value the job status
-      information is not persisted at all in DFS.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/etc/hadoop/conf/health_check</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.check</name>
-    <value>10000</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.interval</name>
-    <value>21600000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.maxtasks.per.job</name>
-    <value>-1</value>
-    <final>true</final>
-    <description>The maximum number of tasks for a single job.
-      A value of -1 indicates that there is no maximum.  </description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapred.userlog.retain.hours</name>
-    <value>24</value>
-    <description>
-      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>1</value>
-    <description>
-      How many tasks to run per jvm. If set to -1, there is no limit
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
-    <description>
+<property>
+  <name>mapred.hosts.exclude</name>
+  <value>/etc/hadoop/conf/mapred.exclude</value>
+  <description>
+    Names a file that contains the list of hosts that
+    should be excluded by the jobtracker.  If the value is empty, no
+    hosts are excluded.
+  </description>
+</property>
+
+<property>
+  <name>mapred.max.tracker.blacklists</name>
+  <value>16</value>
+  <description>
+    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+  </description>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.path</name>
+  <value>file:////mapred/jobstatus</value>
+  <description>
+    Directory path to view job status
+  </description>
+</property>
+
+<property>
+  <name>mapred.healthChecker.interval</name>
+  <value>135000</value>
+</property>
+
+<property>
+  <name>mapred.healthChecker.script.timeout</name>
+  <value>60000</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.active</name>
+  <value>false</value>
+  <description>Indicates if persistency of job status information is
+  active or not.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.hours</name>
+  <value>1</value>
+  <description>The number of hours job status information is persisted in DFS.
+    The job status information will be available after it drops of the memory
+    queue and between jobtracker restarts. With a zero value the job status
+    information is not persisted at all in DFS.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.tracker.persist.jobstatus.dir</name>
+  <value>/etc/hadoop/conf/health_check</value>
+  <description>The directory where the job status information is persisted
+    in a file system to be available after it drops of the memory queue and
+    between jobtracker restarts.
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.check</name>
+  <value>10000</value>
+</property>
+
+<property>
+  <name>mapred.jobtracker.retirejob.interval</name>
+  <value>21600000</value>
+</property>
+
+<property>
+  <name>mapred.job.tracker.history.completed.location</name>
+  <value>/mapred/history/done</value>
+  <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.task.maxvmem</name>
+  <value></value>
+  <final>true</final>
+   <description>No description</description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.maxtasks.per.job</name>
+  <value>-1</value>
+  <final>true</final>
+  <description>The maximum number of tasks for a single job.
+  A value of -1 indicates that there is no maximum.  </description>
+</property>
+
+<property>
+  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+  <value>false</value>
+</property>
+
+<property>
+  <name>mapred.userlog.retain.hours</name>
+  <value>24</value>
+  <description>
+    The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+  </description>
+</property>
+
+<property>
+  <name>mapred.job.reuse.jvm.num.tasks</name>
+  <value>1</value>
+  <description>
+    How many tasks to run per jvm. If set to -1, there is no limit
+  </description>
+  <final>true</final>
+</property>
+
+<property>
+  <name>mapreduce.jobtracker.kerberos.principal</name>
+  <value></value>
+  <description>
       JT user name key.
-    </description>
-  </property>
+ </description>
+</property>
 
-  <property>
-    <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      tt user name key. "_HOST" is replaced by the host name of the task tracker.
-    </description>
-  </property>
+<property>
+  <name>mapreduce.tasktracker.kerberos.principal</name>
+   <value></value>
+  <description>
+       tt user name key. "_HOST" is replaced by the host name of the task tracker.
+   </description>
+</property>
 
 
   <property>
@@ -480,54 +475,54 @@
   </property>
 
 
-  <property>
-    <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
-    <description>
-      The keytab for the jobtracker principal.
-    </description>
+ <property>
+   <name>mapreduce.jobtracker.keytab.file</name>
+   <value></value>
+   <description>
+       The keytab for the jobtracker principal.
+   </description>
 
-  </property>
+</property>
 
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
+ <property>
+   <name>mapreduce.tasktracker.keytab.file</name>
+   <value></value>
     <description>The filename of the keytab for the task tracker</description>
-  </property>
+ </property>
 
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
+ <property>
+   <name>mapred.task.tracker.http.address</name>
+   <value></value>
+   <description>Http address for task tracker.</description>
+ </property>
 
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>/user</value>
-    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-      name. It is a path in the default file system.</description>
-  </property>
+ <property>
+   <name>mapreduce.jobtracker.staging.root.dir</name>
+   <value>/user</value>
+ <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+   name. It is a path in the default file system.</description>
+ </property>
 
-  <property>
-    <name>mapreduce.tasktracker.group</name>
-    <value>hadoop</value>
-    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+ <property>
+      <name>mapreduce.tasktracker.group</name>
+      <value>hadoop</value>
+      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
-  </property>
+ </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-      initialize.
-    </description>
+     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+    initialize.
+   </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-      process</description>
+process</description>
     <final>true</final>
   </property>
 
@@ -542,38 +537,38 @@
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-    <value></value>
+  <value></value>
     <description>Job history user name key. (must map to same user as JT
-      user)</description>
+user)</description>
   </property>
 
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
+ <property>
+   <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-    <value>180</value>
-    <description>
-      3-hour sliding window (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-    <value>15</value>
-    <description>
-      15-minute bucket size (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.names</name>
-    <value>default</value>
-    <description> Comma separated list of queues configured for this jobtracker.</description>
-  </property>
+   <value></value>
+   <description>The keytab for the job history server principal.</description>
+ </property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+  <value>180</value>
+  <description>
+    3-hour sliding window (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+  <value>15</value>
+  <description>
+    15-minute bucket size (value is in minutes)
+  </description>
+</property>
+
+<property>
+  <name>mapred.queue.names</name>
+  <value>default</value>
+  <description> Comma separated list of queues configured for this jobtracker.</description>
+</property>
 
 </configuration>

+ 2 - 12
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml

@@ -150,24 +150,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value></value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value></value>
   </property>
 
   <property>

+ 0 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml

@@ -18,12 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>

+ 2 - 9
ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml

@@ -269,18 +269,11 @@
     <value>INFO,TLA</value>
   </property>
 
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
+    <value></value>
 
-    <description>Java options for the TaskTracker child processes</description>
+    <description>No description</description>
   </property>
 
   <property>

+ 2 - 12
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml

@@ -180,24 +180,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value>0.0.0.0:50010</value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value>0.0.0.0:50075</value>
   </property>
 
   <property>

+ 0 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml

@@ -18,13 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>

+ 2 - 12
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HDFS/configuration/hdfs-site.xml

@@ -150,24 +150,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value></value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value></value>
   </property>
 
   <property>

+ 0 - 6
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/HIVE/configuration/hive-site.xml

@@ -18,12 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>

+ 5 - 10
ambari-server/src/main/resources/stacks/HDPLocal/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml

@@ -91,10 +91,11 @@
     <name>mapred.job.tracker.http.address</name>
     <!-- cluster variant -->
     <value>localhost:50030</value>
-    <description>JobTracker host and http port address</description>
+    <description>Http address for JobTracker</description>
     <final>true</final>
   </property>
 
+
   <property>
     <!-- cluster specific -->
     <name>mapred.local.dir</name>
@@ -269,17 +270,11 @@
     <value>INFO,TLA</value>
   </property>
 
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
-    <description>Java options for the TaskTracker child processes</description>
+    <value></value>
+
+    <description>No description</description>
   </property>
 
   <property>

+ 2 - 12
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HDFS/configuration/hdfs-site.xml

@@ -150,24 +150,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value></value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value></value>
   </property>
 
   <property>

+ 0 - 6
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/HIVE/configuration/hive-site.xml

@@ -18,12 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>

+ 2 - 9
ambari-server/src/main/resources/stacks/HDPLocal/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml

@@ -269,18 +269,11 @@
     <value>INFO,TLA</value>
   </property>
 
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
   <property>
     <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
+    <value></value>
 
-    <description>Java options for the TaskTracker child processes</description>
+    <description>No description</description>
   </property>
 
   <property>

+ 2 - 12
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HDFS/configuration/hdfs-site.xml

@@ -180,24 +180,14 @@
     </description>
   </property>
 
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-  </property>
-
   <property>
     <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
+    <value>0.0.0.0:50010</value>
   </property>
 
   <property>
     <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <value>0.0.0.0:50075</value>
   </property>
 
   <property>

+ 0 - 7
ambari-server/src/main/resources/stacks/HDPLocal/2.0.6/services/HIVE/configuration/hive-site.xml

@@ -18,13 +18,6 @@ limitations under the License.
 -->
 
 <configuration>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
   <property>
     <name>javax.jdo.option.ConnectionURL</name>
     <value>jdbc</value>

+ 6 - 0
ambari-web/app/controllers/main/admin/security/add/step4.js

@@ -418,6 +418,12 @@ App.MainAdminSecurityAddStep4Controller = Em.Controller.extend({
         case 'security_enabled':
           _property.value = 'true';
           break;
+        case 'dfs_datanode_address':
+          _property.value = '1019';
+          break;
+        case 'dfs_datanode_http_address':
+          _property.value = '1022';
+          break;
       }
     }, this);
   },

+ 4 - 4
ambari-web/app/controllers/main/admin/security/disable.js

@@ -387,11 +387,11 @@ App.MainAdminSecurityDisableController = Em.Controller.extend({
             var configName = _config.name;
             if (configName in _serviceConfigTags.configs) {
               switch (configName) {
-                case 'ambari.dfs.datanode.port':
-                  _serviceConfigTags.configs[configName] = '50010';
+                case 'dfs.datanode.address':
+                  _serviceConfigTags.configs[configName] = '0.0.0.0:50010';
                   break;
-                case 'ambari.dfs.datanode.http.port':
-                  _serviceConfigTags.configs[configName] = '50075';
+                case 'dfs.datanode.http.address':
+                  _serviceConfigTags.configs[configName] = '0.0.0.0:50075';
                   break;
                 case 'mapred.task.tracker.task-controller':
                   _serviceConfigTags.configs[configName] = 'org.apache.hadoop.mapred.DefaultTaskController';

+ 1 - 1
ambari-web/app/controllers/main/service/info/configs.js

@@ -1378,7 +1378,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend({
     var globalSiteProperties = {};
     this.get('globalConfigs').forEach(function (_globalSiteObj) {
       // do not pass any globalConfigs whose name ends with _host or _hosts
-      if (_globalSiteObj.isRequiredByAgent === true) {
+      if (!/_hosts?$/.test(_globalSiteObj.name)) {
         // append "m" to JVM memory options except for hadoop_heapsize
         if (/_heapsize|_newsize|_maxnewsize$/.test(_globalSiteObj.name) && !heapsizeException.contains(_globalSiteObj.name)) {
           _globalSiteObj.value += "m";

+ 0 - 1
ambari-web/app/controllers/wizard.js

@@ -736,7 +736,6 @@ App.WizardController = Em.Controller.extend({
           domain: _configProperties.get('domain'),
           filename: _configProperties.get('filename'),
           displayType: _configProperties.get('displayType'),
-          isRequiredByAgent: _configProperties.get('isRequiredByAgent'),
           overrides: overridesArray
         };
         serviceConfigProperties.push(configProperty);

+ 11 - 4
ambari-web/app/controllers/wizard/step8_controller.js

@@ -1404,16 +1404,18 @@ App.WizardStep8Controller = Em.Controller.extend({
     globalSiteObj.forEach(function (_globalSiteObj) {
       var heapsizeException =  ['hadoop_heapsize','yarn_heapsize','nodemanager_heapsize','resourcemanager_heapsize'];
       // do not pass any globals whose name ends with _host or _hosts
-      if (_globalSiteObj.isRequiredByAgent === true) {
+      if (!/_hosts?$/.test(_globalSiteObj.name)) {
         // append "m" to JVM memory options except for hadoop_heapsize
         if (/_heapsize|_newsize|_maxnewsize$/.test(_globalSiteObj.name) && !heapsizeException.contains(_globalSiteObj.name)) {
           globalSiteProperties[_globalSiteObj.name] = _globalSiteObj.value + "m";
         } else {
           globalSiteProperties[_globalSiteObj.name] = App.config.escapeXMLCharacters(_globalSiteObj.value);
         }
-        if (_globalSiteObj.name == 'java64_home') {
-          globalSiteProperties['java64_home'] = this.get('content.installOptions.javaHome');
-        }
+        console.log("STEP8: name of the global property is: " + _globalSiteObj.name);
+        console.log("STEP8: value of the global property is: " + _globalSiteObj.value);
+      }
+      if (_globalSiteObj.name == 'java64_home') {
+        globalSiteProperties['java64_home'] = this.get('content.installOptions.javaHome');
       }
       this._recordHostOverrideFromObj(_globalSiteObj, 'global', 'version1', this);
     }, this);
@@ -1570,11 +1572,16 @@ App.WizardStep8Controller = Em.Controller.extend({
     var globals = this.get('content.serviceConfigProperties').filterProperty('id', 'puppet var');
     if (globals.someProperty('name', 'hive_database')) {
       var hiveDb = globals.findProperty('name', 'hive_database');
+      var hiveHost = globals.findProperty('name', 'hive_hostname').value;
+      var hiveDbName = globals.findProperty('name', 'hive_database_name').value;
       if (hiveDb.value === 'New MySQL Database') {
+        // hiveProperties["javax.jdo.option.ConnectionURL"] = "jdbc:mysql://"+ hiveHost + "/" + hiveDbName + "?createDatabaseIfNotExist=true";
         hiveProperties["javax.jdo.option.ConnectionDriverName"] = "com.mysql.jdbc.Driver";
       } else if (hiveDb.value === 'Existing MySQL Database'){
+        // hiveProperties["javax.jdo.option.ConnectionURL"] = "jdbc:mysql://"+ hiveHost + "/" + hiveDbName + "?createDatabaseIfNotExist=true";
         hiveProperties["javax.jdo.option.ConnectionDriverName"] = "com.mysql.jdbc.Driver";
       } else { //existing oracle database
+        // hiveProperties["javax.jdo.option.ConnectionURL"] = "jdbc:oracle:thin:@//"+ hiveHost + ":1521/" + hiveDbName;
         hiveProperties["javax.jdo.option.ConnectionDriverName"] = "oracle.jdbc.driver.OracleDriver";
       }
     }

+ 15 - 0
ambari-web/app/data/HDP2/config_mapping.js

@@ -67,6 +67,21 @@ var configs = [
     "filename": "core-site.xml",
     "isOverridable": true
   },
+/**********************************************hdfs-site***************************************/
+  {
+    "name": "dfs.datanode.address",
+    "templateName": ["dfs_datanode_address"],
+    "foreignKey": null,
+    "value": "0.0.0.0:<templateName[0]>",
+    "filename": "hdfs-site.xml"
+  },
+  {
+    "name": "dfs.datanode.http.address",
+    "templateName": ["dfs_datanode_http_address"],
+    "foreignKey": null,
+    "value": "0.0.0.0:<templateName[0]>",
+    "filename": "hdfs-site.xml"
+  },
 
 /**********************************************hbase-site***************************************/
   {

+ 170 - 49
ambari-web/app/data/HDP2/global_properties.js

@@ -47,10 +47,6 @@
  *     If this is unspecified, true is assumed.
  *     E.g., true, false
  *
- *     isRequiredByAgent:
- *     Whether the config property is required by agent or not.
- *     If value is true then it will be persisted in global configuration
- *
  *   displayType:
  *     How the config property is to be rendered for user input.
  *     If this is left unspecified, "string" is assumed
@@ -90,7 +86,6 @@ module.exports =
       "displayType": "masterHosts",
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": false,
       "domain": "global",
       "serviceName": "HDFS",
       "category": "NameNode",
@@ -106,7 +101,6 @@ module.exports =
       "unit": "MB",
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": true,
       "domain": "global",
       "serviceName": "HDFS",
       "category": "NameNode",
@@ -122,7 +116,6 @@ module.exports =
       "unit": "MB",
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": true,
       "domain": "global",
       "serviceName": "HDFS",
       "category": "NameNode",
@@ -137,7 +130,6 @@ module.exports =
       "description": "The host that has been assigned to run SecondaryNameNode",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HDFS",
@@ -155,7 +147,6 @@ module.exports =
       "isRequired": false,
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": false,
       "domain": "datanode-global",
       "serviceName": "HDFS",
       "category": "DataNode",
@@ -170,7 +161,6 @@ module.exports =
       "displayType": "int",
       "unit": "MB",
       "isVisible": true,
-      "isRequiredByAgent": true,
       "domain": "datanode-global",
       "serviceName": "HDFS",
       "category": "DataNode",
@@ -184,7 +174,6 @@ module.exports =
       "defaultValue": "1024",
       "displayType": "int",
       "unit": "MB",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HDFS",
@@ -199,7 +188,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HDFS",
@@ -214,7 +202,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HDFS",
@@ -229,7 +216,6 @@ module.exports =
       "displayType": "int",
       "unit": "MB",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": false,
       "domain": "global",
       "serviceName": "HDFS",
@@ -244,12 +230,25 @@ module.exports =
       "isRequired": false,
       "displayType": "checkbox",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": false,
       "domain": "global",
       "serviceName": "HDFS",
       "category": "Advanced"
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_formatted_mark_dir",
+      "displayName": "Hadoop formatted mark directory",
+      "description": "",
+      "defaultValue": "/var/run/hadoop/hdfs/namenode/formatted/",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode"
+    },
     {
       "id": "puppet var",
       "name": "hcat_conf_dir",
@@ -259,12 +258,24 @@ module.exports =
       "isRequired": false,
       "isReconfigurable": false,
       "displayType": "directory",
-      "isRequiredByAgent": true,
       "isVisible": false,
       "domain": "global",
       "serviceName": "HDFS",
       "category": "Advanced"
     },
+    {
+      "id": "puppet var",
+      "name": "hdfs_enable_shortcircuit_read",
+      "displayName": "HDFS Short-circuit read",
+      "description": "",
+      "defaultValue": true,
+      "isRequired": false,
+      "displayType": "checkbox",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
     {
       "id": "puppet var",
       "name": "lzo_enabled",
@@ -273,7 +284,6 @@ module.exports =
       "defaultValue": true,
       "displayType": "checkbox",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": false,
       "domain": "global",
       "serviceName": "HDFS"
@@ -288,7 +298,6 @@ module.exports =
       "defaultValue": "",
       "isOverridable": false,
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "MAPREDUCE2",
@@ -302,7 +311,6 @@ module.exports =
       "description": "",
       "defaultValue": "/var/log/hadoop-mapreduce",
       "displayType": "directory",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "isReconfigurable": false,
       "domain": "global",
@@ -316,7 +324,6 @@ module.exports =
       "description": "",
       "defaultValue": "/var/run/hadoop-mapreduce",
       "displayType": "directory",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "isReconfigurable": false,
       "domain": "global",
@@ -334,7 +341,6 @@ module.exports =
       "isOverridable": false,
       "displayType": "int",
       "unit": "MB",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -349,7 +355,6 @@ module.exports =
       "defaultValue": "",
       "isOverridable": false,
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -365,7 +370,6 @@ module.exports =
       "isOverridable": false,
       "displayType": "int",
       "unit": "MB",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -380,7 +384,6 @@ module.exports =
       "defaultValue": "",
       "isOverridable": false,
       "displayType": "slaveHosts",
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -396,7 +399,6 @@ module.exports =
       "isOverridable": false,
       "displayType": "int",
       "unit": "MB",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -411,7 +413,6 @@ module.exports =
       "defaultValue": "/var/log/hadoop-yarn",
       "displayType": "directory",
       "isReconfigurable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "YARN",
@@ -424,7 +425,6 @@ module.exports =
       "description": "",
       "defaultValue": "/var/run/hadoop-yarn",
       "displayType": "directory",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "isReconfigurable": false,
       "domain": "global",
@@ -441,7 +441,6 @@ module.exports =
       "description": "The host that has been assigned to run HBase Master",
       "displayType": "masterHosts",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HBASE",
@@ -457,7 +456,6 @@ module.exports =
       "displayType": "int",
       "unit": "MB",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HBASE",
@@ -473,7 +471,6 @@ module.exports =
       "description": "The hosts that have been assigned to run RegionServer",
       "displayType": "slaveHosts",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "isRequired": false,
       "domain": "regionserver-global",
@@ -489,7 +486,6 @@ module.exports =
       "defaultValue": "1024",
       "displayType": "int",
       "unit": "MB",
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "regionserver-global",
       "serviceName": "HBASE",
@@ -505,7 +501,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HBASE",
@@ -520,7 +515,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": true,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HBASE",
@@ -549,7 +543,6 @@ module.exports =
       "description": "The host that has been assigned to run Hive Metastore",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "serviceName": "HIVE",
       "category": "Hive Metastore",
@@ -662,7 +655,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "host",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": false,
       "isObserved": true,
       "domain": "global",
@@ -679,7 +671,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "host",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": false,
       "isObserved": true,
       "domain": "global",
@@ -696,7 +687,6 @@ module.exports =
       "description": "Host on which the database will be created by Ambari",
       "isReconfigurable": false,
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": false,
       "domain": "global",
@@ -704,6 +694,22 @@ module.exports =
       "category": "Hive Metastore",
       "index": 3
     },
+    {
+      "id": "puppet var",
+      "name": "hive_database_name",
+      "displayName": "Database Name",
+      "description": "Database name used as the Hive Metastore",
+      "defaultValue": "hive",
+      "isReconfigurable": true,
+      "displayType": "host",
+      "isOverridable": false,
+      "isVisible": true,
+      "isObserved": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 4
+    },
     {
       "id": "puppet var",
       "name": "hive_metastore_port",
@@ -713,7 +719,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "int",
       "isOverridable": false,
-      "isRequiredByAgent": false, // Make this to true when we expose the property on ui by making "isVisible": true
       "isVisible": false,
       "domain": "global",
       "serviceName": "HIVE",
@@ -728,7 +733,19 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isVisible": false,
-      "isRequiredByAgent": false, // Make this to true when we expose the property on ui by making "isVisible": true
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_conf_dir",
+      "displayName": "Hive conf directory",
+      "description": "",
+      "defaultValue": "/etc/hive/conf",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
       "domain": "global",
       "serviceName": "HIVE",
       "category": "Advanced"
@@ -742,7 +759,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isVisible": false,
-      "isRequiredByAgent": false, // Make this to true when we expose the property on ui by making "isVisible": true
       "domain": "global",
       "serviceName": "HIVE",
       "category": "Advanced"
@@ -775,6 +791,19 @@ module.exports =
       "serviceName": "HIVE",
       "category": "Advanced"
     },
+    {
+      "id": "puppet var",
+      "name": "mysql_connector_url",
+      "displayName": "MySQL connector url",
+      "description": "",
+      "defaultValue": "${download_url}/mysql-connector-java-5.1.18.zip",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
     {
       "id": "puppet var",
       "name": "hive_aux_jars_path",
@@ -799,7 +828,6 @@ module.exports =
       "displayType": "masterHost",
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": false,
       "domain": "global",
       "serviceName": "WEBHCAT",
       "category": "WebHCat Server"
@@ -843,7 +871,6 @@ module.exports =
       "displayType": "masterHost",
       "isOverridable": false,
       "isVisible": true,
-      "isRequiredByAgent": false,
       "domain": "global",
       "serviceName": "OOZIE",
       "category": "Oozie Server",
@@ -1028,7 +1055,6 @@ module.exports =
       "isOverridable": false,
       "displayType": "masterHost",
       "isVisible": false,
-      "isRequiredByAgent": false,
       //"domain": "global",
       "serviceName": "OOZIE",
       "category": "Oozie Server"
@@ -1113,7 +1139,6 @@ module.exports =
       "description": "The host that has been assigned to run ZooKeeper Server",
       "displayType": "masterHosts",
       "isVisible": true,
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isRequired": false,
       "serviceName": "ZOOKEEPER",
@@ -1222,6 +1247,19 @@ module.exports =
       "category": "Advanced",
       "index": 1
     },
+    {
+      "id": "puppet var",
+      "name": "zk_pid_file",
+      "displayName": "ZooKeeper PID File",
+      "description": "",
+      "defaultValue": "/var/run/zookeeper/zookeeper_server.pid",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "Advanced"
+    },
   /**********************************************HUE***************************************/
     {
       "id": "puppet var",
@@ -1232,7 +1270,6 @@ module.exports =
       "description": "The host that has been assigned to run Hue Server",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HUE",
@@ -1247,7 +1284,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HUE",
@@ -1262,7 +1298,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HUE",
@@ -1278,7 +1313,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isVisible": false,
-      "isRequiredByAgent": false,
       "domain": "global",
       "serviceName": "GANGLIA",
       "category": "Advanced"
@@ -1293,7 +1327,6 @@ module.exports =
       "isRequired": false,
       "displayType": "directory",
       "isVisible": false,
-      "isRequiredByAgent": false,
       "domain": "global",
       "serviceName": "MISC",
       "category": "General",
@@ -1315,6 +1348,64 @@ module.exports =
       "category": "Users and Groups",
       "belongsToService":["HIVE","WEBHCAT","OOZIE"]
     },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_address",
+      "displayName": "dfs_datanode_address",
+      "description": "",
+      "defaultValue": "50010",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_http_address",
+      "displayName": "dfs_datanode_http_address",
+      "description": "",
+      "defaultValue": "50075",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "gpl_artifacts_download_url",
+      "displayName": "gpl artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "apache_artifacts_download_url",
+      "displayName": "apache artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General",
+      "belongsToService":[]
+    },
     {
       "id": "puppet var",
       "name": "ganglia_runtime_dir",
@@ -1343,6 +1434,36 @@ module.exports =
       "serviceName": "MISC",
       "belongsToService":[]
     },
+    {
+      "id": "puppet var",
+      "name": "run_dir",
+      "displayName": "Hadoop run directory",
+      "description": "",
+      "defaultValue": "/var/run/hadoop",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "hadoop_conf_dir",
+      "displayName": "Hadoop conf directory",
+      "description": "",
+      "defaultValue": "/etc/hadoop/conf",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
     {
       "id": "puppet var",
       "name": "hdfs_user",

+ 4 - 4
ambari-web/app/data/HDP2/secure_mapping.js

@@ -146,18 +146,18 @@ module.exports = [
     "serviceName": "HDFS"
   },
   {
-    "name": "ambari.dfs.datanode.port",
+    "name": "dfs.datanode.address",
     "templateName": ["dfs_datanode_address"],
     "foreignKey": null,
-    "value": "<templateName[0]>",
+    "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml",
     "serviceName": "HDFS"
   },
   {
-    "name": "ambari.dfs.datanode.http.port",
+    "name": "dfs.datanode.http.address",
     "templateName": ["dfs_datanode_http_address"],
     "foreignKey": null,
-    "value": "<templateName[0]>",
+    "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml",
     "serviceName": "HDFS"
   },

+ 0 - 14
ambari-web/app/data/HDP2/site_properties.js

@@ -485,20 +485,6 @@ module.exports =
       "serviceName": "HIVE",
       "index": 7
     },
-    {
-      "id": "site property",
-      "name": "ambari.hive.db.schema.name",
-      "displayName": "Database Name",
-      "description": "Database name used as the Hive Metastore",
-      "defaultValue": "",
-      "isReconfigurable": true,
-      "displayType": "host",
-      "isOverridable": false,
-      "isObserved": true,
-      "serviceName": "HIVE",
-      "category": "Hive Metastore",
-      "index": 4
-    },
 
   /**********************************************hbase-site***************************************/
     {

+ 23 - 0
ambari-web/app/data/config_mapping.js

@@ -66,6 +66,29 @@ var configs = [
     "filename": "core-site.xml",
     "isOverridable" : true
   },
+  {
+    "name": "dfs.datanode.address",
+    "templateName": ["dfs_datanode_address"],
+    "foreignKey": null,
+    "value": "0.0.0.0:<templateName[0]>",
+    "filename": "hdfs-site.xml"
+  },
+  {
+    "name": "dfs.datanode.http.address",
+    "templateName": ["dfs_datanode_http_address"],
+    "foreignKey": null,
+    "value": "0.0.0.0:<templateName[0]>",
+    "filename": "hdfs-site.xml"
+  },
+
+  /******************************************MAPREDUCE***************************************/
+  {
+    "name": "mapred.child.java.opts",
+    "templateName": ["mapred_child_java_opts_sz"],
+    "foreignKey": null,
+    "value": "-server -Xmx<templateName[0]>m -Djava.net.preferIPv4Stack=true",
+    "filename": "mapred-site.xml"
+  },
 
 /**********************************************hbase-site***************************************/
   {

+ 504 - 22
ambari-web/app/data/global_properties.js

@@ -85,7 +85,6 @@ module.exports =
       "description": "The host that has been assigned to run NameNode",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HDFS",
@@ -130,7 +129,6 @@ module.exports =
       "defaultValue": "",
       "description": "The host that has been assigned to run SecondaryNameNode",
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": true,
       "domain": "global",
@@ -147,7 +145,6 @@ module.exports =
       "description": "The hosts that have been assigned to run DataNode",
       "displayType": "slaveHosts",
       "isRequired": false,
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": true,
       "domain": "datanode-global",
@@ -265,7 +262,20 @@ module.exports =
       "serviceName": "HDFS",
       "category": "NameNode"
     },
-
+    {
+      "id": "puppet var",
+      "name": "hcat_conf_dir",
+      "displayName": "HCat conf directory",
+      "description": "",
+      "defaultValue": "",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
     /**********************************************HCFS***************************************/    
     {
       "id": "puppet var",
@@ -361,7 +371,6 @@ module.exports =
       "description": "The host that has been assigned to run JobTracker",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "MAPREDUCE",
@@ -424,12 +433,25 @@ module.exports =
       "isOverridable": false,
       "isVisible": true,
       "isRequired": false,
-      "isRequiredByAgent": false,
       "domain": "tasktracker-global",
       "serviceName": "MAPREDUCE",
       "category": "TaskTracker",
       "index": 0
     },
+    {
+      "id": "puppet var",
+      "name": "mapred_child_java_opts_sz",
+      "displayName": "Java options for MapReduce tasks",
+      "description": "Java options for the TaskTracker child processes.",
+      "defaultValue": "768",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "domain": "tasktracker-global",
+      "serviceName": "MAPREDUCE",
+      "category": "TaskTracker",
+      "index": 4
+    },
     {
       "id": "puppet var",
       "name": "lzo_enabled",
@@ -481,7 +503,6 @@ module.exports =
       "description": "The host that has been assigned to run HBase Master",
       "displayType": "masterHosts",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HBASE",
@@ -512,7 +533,6 @@ module.exports =
       "description": "The hosts that have been assigned to run RegionServer",
       "displayType": "slaveHosts",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "isRequired": false,
       "domain": "regionserver-global",
@@ -584,7 +604,6 @@ module.exports =
       "defaultValue": "",
       "description": "The host that has been assigned to run Hive Metastore",
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": true,
       "serviceName": "HIVE",
@@ -697,7 +716,6 @@ module.exports =
       "defaultValue": "",
       "isReconfigurable": false,
       "displayType": "host",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": false,
       "isObserved": true,
@@ -715,7 +733,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "host",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": false,
       "isObserved": true,
       "domain": "global",
@@ -732,7 +749,6 @@ module.exports =
       "description": "The host where Hive Metastore database is located",
       "isReconfigurable": false,
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": false,
       "domain": "global",
@@ -740,6 +756,22 @@ module.exports =
       "category": "Hive Metastore",
       "index": 3
     },
+    {
+      "id": "puppet var",
+      "name": "hive_database_name",
+      "displayName": "Database Name",
+      "description": "Database name used as the Hive Metastore",
+      "defaultValue": "hive",
+      "isReconfigurable": true,
+      "displayType": "host",
+      "isOverridable": false,
+      "isVisible": true,
+      "isObserved": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 4
+    },
     {
       "id": "puppet var",
       "name": "hive_metastore_port",
@@ -754,6 +786,45 @@ module.exports =
       "serviceName": "HIVE",
       "category": "Advanced"
     },
+    {
+      "id": "puppet var",
+      "name": "hive_lib",
+      "displayName": "Hive library",
+      "description": "",
+      "defaultValue": "/usr/lib/hive/lib/",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_conf_dir",
+      "displayName": "Hive conf directory",
+      "description": "",
+      "defaultValue": "/etc/hive/conf",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_dbroot",
+      "displayName": "Hive db directory",
+      "description": "",
+      "defaultValue": "/usr/lib/hive/lib",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
     {
       "id": "puppet var",
       "name": "hive_log_dir",
@@ -818,7 +889,6 @@ module.exports =
       "description": "The host that has been assigned to run WebHCat Server",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "WEBHCAT",
@@ -862,7 +932,6 @@ module.exports =
       "description": "The host that has been assigned to run Oozie Server",
       "displayType": "masterHost",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "OOZIE",
@@ -887,6 +956,22 @@ module.exports =
       "category": "Oozie Server",
       "index": 1
     },
+    // for new MySQL
+    /*{
+      "id": "puppet var",
+      "name": "oozie_ambari_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "MySQL",
+      "description": "MySQL will be installed by Ambari",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isOverridable": false,
+      // "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 1
+    },*/
     // for current derby
     {
       "id": "puppet var",
@@ -983,7 +1068,6 @@ module.exports =
           "isReconfigurable": false,
           "isOverridable": false,
           "displayType": "host",
-          "isRequiredByAgent": false,
           "isVisible": false,
           "isObserved": true,
           "domain": "global",
@@ -999,7 +1083,6 @@ module.exports =
           "defaultValue": "",
           "isReconfigurable": false,
           "isOverridable": false,
-          "isRequiredByAgent": false,
           "displayType": "host",
           "isVisible": false,
           "isObserved": true,
@@ -1017,7 +1100,6 @@ module.exports =
           "description": "Host on which the database will be created by Ambari",
           "isReconfigurable": false,
           "isOverridable": false,
-          "isRequiredByAgent": false,
           "displayType": "masterHost",
           "isVisible": false,
           "domain": "global",
@@ -1071,6 +1153,80 @@ module.exports =
       "serviceName": "OOZIE",
       "category": "Advanced"
     },
+    /*  {
+     "id": "puppet var",
+     "name": "oozie_database",
+     "displayName": "Oozie Database",
+     "value": "",
+     "defaultValue": "New PostgreSQL Database",
+     "options": [
+     {
+     displayName: 'New PostgreSQL Database',
+     foreignKeys: ['oozie_ambari_database', 'oozie_ambari_host']
+     },
+     {
+     displayName: 'Existing Database',
+     foreignKeys: ['oozie_existing_database', 'oozie_existing_host']
+     }
+     ],
+     "description": "PostgreSQL will be installed by ambari. Any other database will have to be installed by the user.",
+     "displayType": "radio button",
+     "radioName": "oozie-database",
+     "isVisible": true,
+     "serviceName": "OOZIE",
+     "category": "Oozie Server"
+     },
+     {
+     "id": "puppet var",
+     "name": "oozie_existing_database",
+     "displayName": "Oozie Database",
+     "value": "",
+     "defaultValue": "MySQL",
+     "description": "Select the database, if you already have existing one for Oozie.",
+     "displayType": "combobox",
+     "isVisible": false,
+     "options": ['MySQL', 'PostgreSQL'],
+     "serviceName": "OOZIE",
+     "category": "Oozie Server"
+     },
+     {
+     "id": "puppet var",
+     "name": "oozie_existing_host",
+     "displayName": "Database Host",
+     "description": "Select the host on which the existing database is hosted.",
+     "defaultValue": "",
+     "isReconfigurable": false,
+     "displayType": "host",
+     "isVisible": false,
+     "serviceName": "OOZIE",
+     "category": "Oozie Server"
+     },
+     {
+     "id": "puppet var",
+     "name": "oozie_ambari_database",
+     "displayName": "Oozie Database",
+     "value": "",
+     "defaultValue": "PostgreSQL",
+     "description": "PostgreSQL will be installed by ambari.",
+     "displayType": "masterHost",
+     "isVisible": true,
+     "serviceName": "OOZIE",
+     "category": "Oozie Server"
+     },
+     {
+     "id": "puppet var",
+     "name": "oozie_ambari_host",
+     "value": "",
+     "defaultValue": "",
+     "displayName": "PostgreSQL host",
+     "description": "Host  on which the PostgreSQL database will be created by ambari. ",
+     "isReconfigurable": false,
+     "displayType": "masterHost",
+     "isVisible": true,
+     "serviceName": "OOZIE",
+     "category": "Oozie Server"
+     },
+     */
   /**********************************************NAGIOS***************************************/
     {
       "id": "puppet var",
@@ -1122,7 +1278,6 @@ module.exports =
       "defaultValue": "",
       "description": "The host that has been assigned to run ZooKeeper Server",
       "displayType": "masterHosts",
-      "isRequiredByAgent": false,
       "isVisible": true,
       "isOverridable": false,
       "isRequired": false,
@@ -1232,6 +1387,19 @@ module.exports =
       "category": "Advanced",
       "index": 1
     },
+    {
+      "id": "puppet var",
+      "name": "zk_pid_file",
+      "displayName": "ZooKeeper PID File",
+      "description": "",
+      "defaultValue": "/var/run/zookeeper/zookeeper_server.pid",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "Advanced"
+    },
   /**********************************************HUE***************************************/
     {
       "id": "puppet var",
@@ -1241,7 +1409,6 @@ module.exports =
       "defaultValue": "",
       "description": "The host that has been assigned to run Hue Server",
       "displayType": "masterHost",
-      "isRequiredByAgent": false,
       "isOverridable": false,
       "isVisible": true,
       "domain": "global",
@@ -1257,7 +1424,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HUE",
@@ -1272,7 +1438,6 @@ module.exports =
       "isReconfigurable": false,
       "displayType": "directory",
       "isOverridable": false,
-      "isRequiredByAgent": false,
       "isVisible": true,
       "domain": "global",
       "serviceName": "HUE",
@@ -1292,7 +1457,21 @@ module.exports =
       "serviceName": "GANGLIA",
       "category": "Advanced"
     },
-  /**********************************************MISC******************************************/
+  /**********************************************MISC***************************************/
+    {
+      "id": "puppet var",
+      "name": "hbase_conf_dir",
+      "displayName": "HBase conf dir",
+      "description": "",
+      "defaultValue": "/etc/hbase",
+      "isRequired": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General",
+      "belongsToService":[]
+    },
     {
       "id": "puppet var",
       "name": "proxyuser_group",
@@ -1309,6 +1488,64 @@ module.exports =
       "category": "Users and Groups",
       "belongsToService":["HIVE","WEBHCAT","OOZIE"]
     },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_address",
+      "displayName": "dfs_datanode_address",
+      "description": "",
+      "defaultValue": "50010",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_http_address",
+      "displayName": "dfs_datanode_http_address",
+      "description": "",
+      "defaultValue": "50075",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "gpl_artifacts_download_url",
+      "displayName": "gpl artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "apache_artifacts_download_url",
+      "displayName": "apache artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General",
+      "belongsToService":[]
+    },
     {
       "id": "puppet var",
       "name": "ganglia_runtime_dir",
@@ -1323,6 +1560,48 @@ module.exports =
       "category": "General",
       "belongsToService":[]
     },
+    /*
+    {
+      "id": "puppet var",
+      "name": "ganglia_shell_cmds_dir",
+      "displayName": "ganglia_shell_cmds_dir",
+      "description": "",
+      "defaultValue": "/usr/libexec/hdp/ganglia",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "webserver_group",
+      "displayName": "ganglia_shell_cmds_dir",
+      "description": "",
+      "defaultValue": "apache",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    */
+    /*
+    {
+      "id": "puppet var",
+      "name": "jdk_location",
+      "displayName": "URL to download 64-bit JDK",
+      "description": "URL from where the 64-bit JDK binary can be downloaded",
+      "defaultValue": "",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "url",
+      "isVisible": true,
+      "serviceName": "MISC"
+    },
+    */
     {
       "id": "puppet var",
       "name": "java64_home",
@@ -1337,6 +1616,138 @@ module.exports =
       "serviceName": "MISC",
       "belongsToService":[]
     },
+    {
+      "id": "puppet var",
+      "name": "run_dir",
+      "displayName": "Hadoop run directory",
+      "description": "",
+      "defaultValue": "/var/run/hadoop",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    {
+      "id": "puppet var",
+      "name": "hadoop_conf_dir",
+      "displayName": "Hadoop conf directory",
+      "description": "",
+      "defaultValue": "/etc/hadoop/conf",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced",
+      "belongsToService":[]
+    },
+    /*
+    {
+      "id": "puppet var",
+      "name": "hcat_metastore_port",
+      "displayName": "hcat_metastore_port",
+      "description": "",
+      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
+      "isRequired": true,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_lib",
+      "displayName": "hcat_lib",
+      "description": "",
+      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
+      "isRequired": true,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+
+    {
+      "id": "puppet var",
+      "name": "hcat_dbroot",
+      "displayName": "hcat_dbroot",
+      "description": "",
+      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
+      "isRequired": true,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_dbroot",
+      "displayName": "hcat_dbroot",
+      "description": "",
+      "defaultValue": "/usr/lib/hcatalog/share/hcatalog",
+      "isRequired": true,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+
+     {
+     "id": "puppet var",
+     "name": "hadoop_log_dir",
+     "displayName": "Hadoop Log Dir",
+     "description": "Directory for Hadoop log files",
+     "defaultValue": "/var/log/hadoop",
+     "isReconfigurable": false,
+     "displayType": "directory",
+     "isVisible":  true, "serviceName": "MISC",
+     "category": "Advanced"
+     },
+     {
+     "id": "puppet var",
+     "name": "hadoop_pid_dir",
+     "displayName": "Hadoop PID Dir",
+     "description": "Directory in which the pid files for Hadoop processes will be created",
+     "defaultValue": "/var/run/hadoop",
+     "isReconfigurable": false,
+     "displayType": "directory",
+     "isVisible":  true, "serviceName": "MISC",
+     "category": "Advanced"
+     },
+    {
+      "id": "puppet var",
+      "name": "using_local_repo",
+      "displayName": "Whether a local repo is being used",
+      "description": "Whether a local repo is being used",
+      "defaultValue": false,
+      "isReconfigurable": false,
+      "displayType": "checkbox",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+    {
+      "id": "puppet var",
+      "name": "yum_repo_file",
+      "displayName": "Path to local repo file",
+      "description": "Path to local repository file that configures from where to download software packages",
+      "defaultValue": "/etc/yum.repos.d/hdp.repo",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+    */
     {
       "id": "puppet var",
       "name": "hdfs_user",
@@ -1442,6 +1853,63 @@ module.exports =
       "category": "Users and Groups",
       "belongsToService":["OOZIE"]
     },
+    /*
+    {
+      "id": "puppet var",
+      "name": "oozie_conf_dir",
+      "displayName": "Oozie conf dir",
+      "description": "",
+      "defaultValue": "/etc/oozie",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+
+    {
+      "id": "puppet var",
+      "name": "pig_conf_dir",
+      "displayName": "Pig conf dir",
+      "description": "",
+      "defaultValue": "/etc/pig",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    */
+    /*
+    {
+      "id": "puppet var",
+      "name": "sqoop_conf_dir",
+      "displayName": "sqoop conf dir",
+      "description": "",
+      "defaultValue": "/etc/sqoop",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "sqoop_lib",
+      "displayName": "sqoop conf dir",
+      "description": "",
+      "defaultValue": "/usr/lib/sqoop/lib/",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    */
     {
       "id": "puppet var",
       "name": "zk_user",
@@ -1547,6 +2015,20 @@ module.exports =
       "category": "Users and Groups",
       "belongsToService":["HDFS"]
     },
+    /*
+    {
+      "id": "puppet var",
+      "name": "zk_conf_dir",
+      "displayName": "zk_conf_dir",
+      "description": "",
+      "defaultValue": "/etc/conf/",
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    */
     {
       "id": "puppet var",
       "name": "rrdcached_base_dir",

+ 4 - 4
ambari-web/app/data/secure_mapping.js

@@ -123,18 +123,18 @@ module.exports = [
     "serviceName": "HDFS"
   },
   {
-    "name": "ambari.dfs.datanode.port",
+    "name": "dfs.datanode.address",
     "templateName": ["dfs_datanode_address"],
     "foreignKey": null,
-    "value": "<templateName[0]>",
+    "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml",
     "serviceName": "HDFS"
   },
   {
-    "name": "ambari.dfs.datanode.http.port",
+    "name": "dfs.datanode.http.address",
     "templateName": ["dfs_datanode_http_address"],
     "foreignKey": null,
-    "value": "<templateName[0]>",
+    "value": "0.0.0.0:<templateName[0]>",
     "filename": "hdfs-site.xml",
     "serviceName": "HDFS"
   },

+ 0 - 26
ambari-web/app/data/site_properties.js

@@ -344,18 +344,6 @@ module.exports =
       "category": "Advanced",
       "serviceName": "MAPREDUCE"
     },
-    {
-      "id": "site property",
-      "name": "ambari.mapred.child.java.opts.memory",
-      "displayName": "Java options for MapReduce tasks",
-      "description": "Java options for the TaskTracker child processes.",
-      "defaultValue": "",
-      "displayType": "int",
-      "unit": "MB",
-      "category": "TaskTracker",
-      "serviceName": "MAPREDUCE",
-      "index": 4
-    },
 
   /**********************************************oozie-site***************************************/
     {
@@ -472,20 +460,6 @@ module.exports =
       "serviceName": "HIVE",
       "index": 7
     },
-    {
-      "id": "site property",
-      "name": "ambari.hive.db.schema.name",
-      "displayName": "Database Name",
-      "description": "Database name used as the Hive Metastore",
-      "defaultValue": "",
-      "isReconfigurable": true,
-      "displayType": "host",
-      "isOverridable": false,
-      "isObserved": true,
-      "serviceName": "HIVE",
-      "category": "Hive Metastore",
-      "index": 4
-    },
 
   /**********************************************hbase-site***************************************/
     {

+ 0 - 1
ambari-web/app/models/service_config.js

@@ -145,7 +145,6 @@ App.ServiceConfigProperty = Ember.Object.extend({
   isReconfigurable: true, // by default a config property is reconfigurable
   isEditable: true, // by default a config property is editable
   isVisible: true,
-  isRequiredByAgent: true, // Setting it to true implies property will be stored in global configuration
   isSecureConfig: false,
   errorMessage: '',
   warnMessage: '',

+ 78 - 92
ambari-web/app/utils/config.js

@@ -42,16 +42,16 @@ App.config = Em.Object.create({
     "&quot;": '"',
     "&apos;": "'"
   },
-
+  
   /**
    * Since values end up in XML files (core-sit.xml, etc.), certain
    * XML sensitive characters should be escaped. If not we will have
-   * an invalid XML document, and services will fail to start.
-   *
+   * an invalid XML document, and services will fail to start. 
+   * 
    * Special characters in XML are defined at
    * http://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references#Predefined_entities_in_XML
    */
-  escapeXMLCharacters: function (value) {
+  escapeXMLCharacters: function(value) {
     var self = this;
     // To prevent double/triple replacing '&gt;' to '&gt;gt;' to '&gt;gt;gt', we need
     // to first unescape all XML chars, and then escape them again.
@@ -62,28 +62,28 @@ App.config = Em.Object.create({
       return self.xmlEscapeMap[s];
     });
   },
-  preDefinedServiceConfigs: function () {
+  preDefinedServiceConfigs: function(){
     var configs = this.get('preDefinedGlobalProperties');
     var services = [];
-    $.extend(true, [], require('data/service_configs')).forEach(function (service) {
+    $.extend(true, [], require('data/service_configs')).forEach(function(service){
       service.configs = configs.filterProperty('serviceName', service.serviceName);
       services.push(service);
     });
     return services;
   }.property('preDefinedGlobalProperties'),
-  configMapping: function () {
-    if (App.get('isHadoop2Stack')) {
-      return $.extend(true, [], require('data/HDP2/config_mapping'));
-    }
-    return $.extend(true, [], require('data/config_mapping'));
+  configMapping: function() {
+      if (App.get('isHadoop2Stack')) {
+        return $.extend(true, [],require('data/HDP2/config_mapping'));
+      }
+    return $.extend(true, [],require('data/config_mapping'));
   }.property('App.isHadoop2Stack'),
-  preDefinedGlobalProperties: function () {
+  preDefinedGlobalProperties: function() {
     if (App.get('isHadoop2Stack')) {
       return $.extend(true, [], require('data/HDP2/global_properties').configProperties);
     }
     return $.extend(true, [], require('data/global_properties').configProperties);
   }.property('App.isHadoop2Stack'),
-  preDefinedSiteProperties: function () {
+  preDefinedSiteProperties: function() {
     if (App.get('isHadoop2Stack')) {
       return $.extend(true, [], require('data/HDP2/site_properties').configProperties);
     }
@@ -98,9 +98,9 @@ App.config = Em.Object.create({
   //categories which contain custom configs
   categoriesWithCustom: ['CapacityScheduler'],
   //configs with these filenames go to appropriate category not in Advanced
-  customFileNames: function () {
+  customFileNames: function() {
     if (App.supports.capacitySchedulerUi) {
-      if (App.get('isHadoop2Stack')) {
+      if(App.get('isHadoop2Stack')){
         return ['capacity-scheduler.xml'];
       }
       return ['capacity-scheduler.xml', 'mapred-queue-acls.xml'];
@@ -144,12 +144,12 @@ App.config = Em.Object.create({
 
   /**
    * Array of global "service/desired_tag/actual_tag" strings which
-   * indicate different configurations. We cache these so that
+   * indicate different configurations. We cache these so that 
    * we dont have to recalculate if two tags are difference.
    */
-  differentGlobalTagsCache: [],
-
-  identifyCategory: function (config) {
+  differentGlobalTagsCache:[],
+  
+  identifyCategory: function(config){
     var category = null;
     var serviceConfigMetaData = this.get('preDefinedServiceConfigs').findProperty('serviceName', config.serviceName);
     if (serviceConfigMetaData) {
@@ -167,7 +167,7 @@ App.config = Em.Object.create({
    * checkbox and digital which values with 'm' at the end
    * @param config
    */
-  handleSpecialProperties: function (config) {
+  handleSpecialProperties: function(config){
     if (config.displayType === 'int' && /\d+m$/.test(config.value)) {
       config.value = config.value.slice(0, config.value.length - 1);
       config.defaultValue = config.value;
@@ -183,12 +183,12 @@ App.config = Em.Object.create({
    * @param isAdvanced
    * @param advancedConfigs
    */
-  calculateConfigProperties: function (config, isAdvanced, advancedConfigs) {
+  calculateConfigProperties: function(config, isAdvanced, advancedConfigs){
     if (!isAdvanced || this.get('customFileNames').contains(config.filename)) {
       var categoryMetaData = this.identifyCategory(config);
       if (categoryMetaData != null) {
         config.category = categoryMetaData.get('name');
-        if (!isAdvanced) config.isUserProperty = true;
+        if(!isAdvanced) config.isUserProperty = true;
       }
     } else {
       config.category = config.category ? config.category : 'Advanced';
@@ -200,7 +200,7 @@ App.config = Em.Object.create({
   capacitySchedulerFilter: function () {
     var yarnRegex = /^yarn\.scheduler\.capacity\.root\.(?!unfunded)([a-z]([\_\-a-z0-9]{0,50}))\.(acl_administer_jobs|acl_submit_jobs|state|user-limit-factor|maximum-capacity|capacity)$/i;
     var self = this;
-    if (App.get('isHadoop2Stack')) {
+    if(App.get('isHadoop2Stack')){
       return function (_config) {
         return (yarnRegex.test(_config.name));
       }
@@ -228,6 +228,7 @@ App.config = Em.Object.create({
     var globalConfigs = [];
     var preDefinedConfigs = this.get('preDefinedGlobalProperties').concat(this.get('preDefinedSiteProperties'));
     var mappingConfigs = [];
+
     tags.forEach(function (_tag) {
       var isAdvanced = null;
       var properties = configGroups.filter(function (serviceConfigProperties) {
@@ -249,12 +250,20 @@ App.config = Em.Object.create({
         });
 
         if (configsPropertyDef) {
-          this.setServiceConfigUiAttributes(serviceConfigObj, configsPropertyDef);
+          serviceConfigObj.displayType = configsPropertyDef.displayType;
+          serviceConfigObj.isRequired = (configsPropertyDef.isRequired !== undefined) ? configsPropertyDef.isRequired : true;
+          serviceConfigObj.isReconfigurable = (configsPropertyDef.isReconfigurable !== undefined) ? configsPropertyDef.isReconfigurable : true;
+          serviceConfigObj.isVisible = (configsPropertyDef.isVisible !== undefined) ? configsPropertyDef.isVisible : true;
+          serviceConfigObj.unit = (configsPropertyDef.unit !== undefined) ? configsPropertyDef.unit : undefined;
+          serviceConfigObj.description = (configsPropertyDef.description !== undefined) ? configsPropertyDef.description : undefined;
+          serviceConfigObj.isOverridable = configsPropertyDef.isOverridable === undefined ? true : configsPropertyDef.isOverridable;
+          serviceConfigObj.serviceName = configsPropertyDef ? configsPropertyDef.serviceName : null;
+          serviceConfigObj.index = configsPropertyDef.index;
+          serviceConfigObj.isSecureConfig = configsPropertyDef.isSecureConfig === undefined ? false : configsPropertyDef.isSecureConfig;
+          serviceConfigObj.belongsToService = configsPropertyDef.belongsToService;
+          serviceConfigObj.category = configsPropertyDef.category;
         }
         if (_tag.siteName === 'global') {
-          if (configsPropertyDef.isRequiredByAgent === false) {
-            continue;
-          }
           if (configsPropertyDef) {
             this.handleSpecialProperties(serviceConfigObj);
           } else {
@@ -284,55 +293,34 @@ App.config = Em.Object.create({
       mappingConfigs: mappingConfigs
     }
   },
-
-  /**
-   * @param serviceConfigObj : Object
-   * @param configsPropertyDef : Object
-   */
-  setServiceConfigUiAttributes: function (serviceConfigObj, configsPropertyDef) {
-    serviceConfigObj.displayType = configsPropertyDef.displayType;
-    serviceConfigObj.isRequired = (configsPropertyDef.isRequired !== undefined) ? configsPropertyDef.isRequired : true;
-    serviceConfigObj.isRequiredByAgent = (configsPropertyDef.isRequiredByAgent !== undefined) ? configsPropertyDef.isRequiredByAgent : true;
-    serviceConfigObj.isReconfigurable = (configsPropertyDef.isReconfigurable !== undefined) ? configsPropertyDef.isReconfigurable : true;
-    serviceConfigObj.isVisible = (configsPropertyDef.isVisible !== undefined) ? configsPropertyDef.isVisible : true;
-    serviceConfigObj.unit = (configsPropertyDef.unit !== undefined) ? configsPropertyDef.unit : undefined;
-    serviceConfigObj.description = (configsPropertyDef.description !== undefined) ? configsPropertyDef.description : undefined;
-    serviceConfigObj.isOverridable = configsPropertyDef.isOverridable === undefined ? true : configsPropertyDef.isOverridable;
-    serviceConfigObj.serviceName = configsPropertyDef ? configsPropertyDef.serviceName : null;
-    serviceConfigObj.index = configsPropertyDef.index;
-    serviceConfigObj.isSecureConfig = configsPropertyDef.isSecureConfig === undefined ? false : configsPropertyDef.isSecureConfig;
-    serviceConfigObj.belongsToService = configsPropertyDef.belongsToService;
-    serviceConfigObj.category = configsPropertyDef.category;
-  },
-
   /**
    * synchronize order of config properties with order, that on UI side
    * @param configSet
    * @return {Object}
    */
-  syncOrderWithPredefined: function (configSet) {
+  syncOrderWithPredefined: function(configSet){
     var globalConfigs = configSet.globalConfigs,
-      siteConfigs = configSet.configs,
-      globalStart = [],
-      siteStart = [];
+        siteConfigs = configSet.configs,
+        globalStart = [],
+        siteStart = [];
 
-    this.get('preDefinedGlobalProperties').mapProperty('name').forEach(function (name) {
+    this.get('preDefinedGlobalProperties').mapProperty('name').forEach(function(name){
       var _global = globalConfigs.findProperty('name', name);
-      if (_global) {
+      if(_global){
         globalStart.push(_global);
         globalConfigs = globalConfigs.without(_global);
       }
     }, this);
 
-    this.get('preDefinedSiteProperties').mapProperty('name').forEach(function (name) {
+    this.get('preDefinedSiteProperties').mapProperty('name').forEach(function(name){
       var _site = siteConfigs.findProperty('name', name);
-      if (_site) {
+      if(_site){
         siteStart.push(_site);
         siteConfigs = siteConfigs.without(_site);
       }
     }, this);
 
-    var alphabeticalSort = function (a, b) {
+    var alphabeticalSort = function(a, b){
       if (a.name < b.name) return -1;
       if (a.name > b.name) return 1;
       return 0;
@@ -423,7 +411,7 @@ App.config = Em.Object.create({
       if (_config) {
         if (this.get('configMapping').computed().someProperty('name', _config.name)) {
         } else if (!(configsToVerifying.someProperty('name', _config.name))) {
-          if (this.get('customFileNames').contains(_config.filename)) {
+          if(this.get('customFileNames').contains(_config.filename)){
             categoryMetaData = this.identifyCategory(_config);
             if (categoryMetaData != null) {
               configCategory = categoryMetaData.get('name');
@@ -455,7 +443,7 @@ App.config = Em.Object.create({
     var stored = configs.filter(function (_config) {
       return this.get('categoriesWithCustom').contains(_config.category);
     }, this);
-    if (App.supports.capacitySchedulerUi) {
+    if(App.supports.capacitySchedulerUi){
       var queueProperties = stored.filter(this.get('capacitySchedulerFilter'));
       if (queueProperties.length) {
         queueProperties.setEach('isQueue', true);
@@ -464,10 +452,8 @@ App.config = Em.Object.create({
   },
 
   miscConfigVisibleProperty: function (configs, serviceToShow) {
-    configs.forEach(function (item) {
-      item.set("isVisible", item.belongsToService.some(function (cur) {
-        return serviceToShow.contains(cur)
-      }));
+    configs.forEach(function(item) {
+      item.set("isVisible", item.belongsToService.some(function(cur){return serviceToShow.contains(cur)}));
     });
     return configs;
   },
@@ -517,10 +503,10 @@ App.config = Em.Object.create({
       // Use calculated default values for some configs
       var recommendedDefaults = {};
       if (!storedConfigs && service.defaultsProviders) {
-        service.defaultsProviders.forEach(function (defaultsProvider) {
+        service.defaultsProviders.forEach(function(defaultsProvider) {
           var defaults = defaultsProvider.getDefaults(localDB);
-          for (var name in defaults) {
-            recommendedDefaults[name] = defaults[name];
+          for(var name in defaults) {
+        	recommendedDefaults[name] = defaults[name];
             var config = configsByService.findProperty('name', name);
             if (config) {
               config.set('value', defaults[name]);
@@ -530,10 +516,10 @@ App.config = Em.Object.create({
         });
       }
       if (service.configsValidator) {
-        service.configsValidator.set('recommendedDefaults', recommendedDefaults);
-        var validators = service.configsValidator.get('configValidators');
-        for (var validatorName in validators) {
-          var c = configsByService.findProperty('name', validatorName);
+    	service.configsValidator.set('recommendedDefaults', recommendedDefaults);
+    	var validators = service.configsValidator.get('configValidators');
+    	for (var validatorName in validators) {
+        var c = configsByService.findProperty('name', validatorName);
           if (c) {
             c.set('serviceValidator', service.configsValidator);
           }
@@ -546,13 +532,13 @@ App.config = Em.Object.create({
     return renderedServiceConfigs;
   },
   /**
-   Takes care of the "dynamic defaults" for the HCFS configs.  Sets
-   some of the config defaults to previously user-entered data.
-   **/
+  Takes care of the "dynamic defaults" for the HCFS configs.  Sets
+  some of the config defaults to previously user-entered data.
+  **/
   tweakDynamicDefaults: function (localDB, serviceConfigProperty, config) {
     console.log("Step7: Tweaking Dynamic defaults");
     var firstHost = null;
-    for (var host in localDB.hosts) {
+    for(var host in localDB.hosts) {
       firstHost = host;
       break;
     }
@@ -606,7 +592,7 @@ App.config = Em.Object.create({
     serviceConfig.configCategories.filterProperty('isCustomView', true).forEach(function (category) {
       switch (category.name) {
         case 'CapacityScheduler':
-          if (App.supports.capacitySchedulerUi) {
+          if(App.supports.capacitySchedulerUi){
             category.set('customView', App.ServiceConfigCapacityScheduler);
           } else {
             category.set('isCustomView', false);
@@ -713,14 +699,14 @@ App.config = Em.Object.create({
     }
     return globalPropertyToServicesMap;
   },
-
+  
   loadGlobalPropertyToServicesMapSuccess: function (data) {
     globalPropertyToServicesMap = {};
-    if (data.items != null) {
-      data.items.forEach(function (service) {
-        service.configurations.forEach(function (config) {
-          if ("global.xml" === config.StackConfigurations.type) {
-            if (!(config.StackConfigurations.property_name in globalPropertyToServicesMap)) {
+    if(data.items!=null){
+      data.items.forEach(function(service){
+        service.configurations.forEach(function(config){
+          if("global.xml" === config.StackConfigurations.type){
+            if(!(config.StackConfigurations.property_name in globalPropertyToServicesMap)){
               globalPropertyToServicesMap[config.StackConfigurations.property_name] = [];
             }
             globalPropertyToServicesMap[config.StackConfigurations.property_name].push(service.StackServices.service_name);
@@ -729,7 +715,7 @@ App.config = Em.Object.create({
       });
     }
   },
-
+  
   /**
    * Hosts can override service configurations per property. This method GETs
    * the overriden configurations and sets only the changed properties into
@@ -888,19 +874,19 @@ App.config = Em.Object.create({
    * @param filename
    * @return {*}
    */
-  fileConfigsIntoTextarea: function (configs, filename) {
+  fileConfigsIntoTextarea: function(configs, filename){
     var fileConfigs = configs.filterProperty('filename', filename);
     var value = '';
     var defaultValue = '';
     var complexConfig = this.get('complexConfigs').findProperty('filename', filename);
-    if (complexConfig) {
-      fileConfigs.forEach(function (_config) {
+    if(complexConfig){
+      fileConfigs.forEach(function(_config){
         value += _config.name + '=' + _config.value + '\n';
         defaultValue += _config.name + '=' + _config.defaultValue + '\n';
       }, this);
       complexConfig.value = value;
       complexConfig.defaultValue = defaultValue;
-      configs = configs.filter(function (_config) {
+      configs = configs.filter(function(_config){
         return _config.filename !== filename;
       });
       configs.push(complexConfig);
@@ -915,7 +901,7 @@ App.config = Em.Object.create({
    * @param filename
    * @return {*}
    */
-  textareaIntoFileConfigs: function (configs, filename) {
+  textareaIntoFileConfigs: function(configs, filename){
     var complexConfigName = this.get('complexConfigs').findProperty('filename', filename).name;
     var configsTextarea = configs.findProperty('name', complexConfigName);
     if (configsTextarea) {
@@ -951,12 +937,12 @@ App.config = Em.Object.create({
    * @param property
    * @returns {*}
    */
-  trimProperty: function (property, isEmberObject) {
+  trimProperty: function(property, isEmberObject){
     var displayType = (isEmberObject) ? property.get('displayType') : property.displayType;
     var value = (isEmberObject) ? property.get('value') : property.value;
     var name = (isEmberObject) ? property.get('name') : property.name;
     var rez;
-    switch (displayType) {
+    switch (displayType){
       case 'directories':
       case 'directory':
         rez = value.trim().split(/\s+/g).join(',');
@@ -967,7 +953,7 @@ App.config = Em.Object.create({
       case 'password':
         break;
       case 'advanced':
-        if (name == 'javax.jdo.option.ConnectionURL' || name == 'oozie.service.JPAService.jdbc.url') {
+        if(name == 'javax.jdo.option.ConnectionURL' || name == 'oozie.service.JPAService.jdbc.url') {
           rez = value.trim();
         }
       default:
@@ -976,7 +962,7 @@ App.config = Em.Object.create({
     return ((rez == '') || (rez == undefined)) ? value : rez;
   },
 
-  OnNnHAHideSnn: function (ServiceConfig) {
+  OnNnHAHideSnn: function(ServiceConfig) {
     var configCategories = ServiceConfig.get('configCategories');
     var snCategory = configCategories.findProperty('name', 'SNameNode');
     var activeNn = App.HDFSService.find('HDFS').get('activeNameNode.hostName');
@@ -1140,7 +1126,7 @@ App.config = Em.Object.create({
 
   /**
    * Gets all the configuration-groups for a host.
-   *
+   * 
    * @param hostName
    *          (string) host name used to register
    * @return Array of App.ConfigGroups

+ 0 - 4
ambari-web/app/utils/helper.js

@@ -126,10 +126,6 @@ String.prototype.highlight = function (words, highlightTemplate) {
   return self;
 };
 
-Array.prototype.move = function(from, to) {
-  this.splice(to, 0, this.splice(from, 1)[0]);
-};
-
 Number.prototype.toDaysHoursMinutes = function () {
   var formatted = {},
     dateDiff = this,

+ 1 - 1
ambari-web/app/views/wizard/controls_view.js

@@ -251,7 +251,7 @@ App.ServiceConfigRadioButtons = Ember.View.extend({
   databaseName: function () {
     switch (this.get('serviceConfig.serviceName')) {
       case 'HIVE':
-        return this.get('categoryConfigsAll').findProperty('name', 'ambari.hive.db.schema.name').get('value');
+        return this.get('categoryConfigsAll').findProperty('name', 'hive_database_name').get('value');
       case 'OOZIE':
         return this.get('categoryConfigsAll').findProperty('name', 'oozie.db.schema.name').get('value');
       default: