Quellcode durchsuchen

AMBARI-5387. Stack definition does not provide global properties with empty values. (Jaimin Jettly, swagle via swagle)

Siddharth Wagle vor 11 Jahren
Ursprung
Commit
d3f45041f7
41 geänderte Dateien mit 168 neuen und 467 gelöschten Zeilen
  1. 2 4
      ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java
  2. 3 8
      ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java
  3. 0 15
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml
  4. 7 13
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml
  5. 9 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml
  6. 0 20
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml
  7. 3 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-policy.xml
  8. 10 10
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml
  9. 4 32
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml
  10. 3 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml
  11. 0 10
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml
  12. 6 24
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml
  13. 1 36
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml
  14. 0 16
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml
  15. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
  16. 0 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/global.xml
  17. 0 15
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/global.xml
  18. 7 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml
  19. 0 20
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml
  20. 10 16
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml
  21. 5 33
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml
  22. 3 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml
  23. 2 37
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml
  24. 0 16
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml
  25. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml
  26. 0 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/global.xml
  27. 1 21
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-site.xml
  28. 0 10
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml
  29. 0 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/global.xml
  30. 6 8
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/falcon-startup.properties.xml
  31. 4 22
      ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml
  32. 0 16
      ambari-server/src/main/resources/stacks/HDP/2.1/services/OOZIE/configuration/oozie-site.xml
  33. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/configuration/webhcat-site.xml
  34. 0 10
      ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/global.xml
  35. 16 14
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  36. 10 2
      ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java
  37. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  38. 34 0
      ambari-web/app/data/HDP2/site_properties.js
  39. 16 0
      ambari-web/app/data/site_properties.js
  40. 1 1
      ambari-web/app/utils/helper.js
  41. 1 1
      ambari-web/app/views/common/configs/services_config.js

+ 2 - 4
ambari-server/src/main/java/org/apache/ambari/server/api/services/AmbariMetaInfo.java

@@ -724,13 +724,11 @@ public class AmbariMetaInfo {
       }
 
       // Populate services
-      List<ServiceInfo> services = stackExtensionHelper
-        .getAllApplicableServices(stack);
+      List<ServiceInfo> services = stackExtensionHelper.getAllApplicableServices(stack);
       stack.setServices(services);
 
       // Resolve hooks folder
-      String stackHooksToUse = stackExtensionHelper.
-              resolveHooksFolder(stack);
+      String stackHooksToUse = stackExtensionHelper.resolveHooksFolder(stack);
       stack.setStackHooksFolder(stackHooksToUse);
     }
 

+ 3 - 8
ambari-server/src/main/java/org/apache/ambari/server/api/util/StackExtensionHelper.java

@@ -594,12 +594,9 @@ public class StackExtensionHelper {
       List<PropertyInfo> list = new ArrayList<PropertyInfo>();
       
       for (PropertyInfo pi : cx.getProperties()) {
-        // maintain old behavior
-        if (null == pi.getValue() || pi.getValue().isEmpty())
-          continue;
-        
         pi.setFilename(propertyFile.getName());
         list.add(pi);
+
       }
       return list;
     } catch (Exception e) {
@@ -620,12 +617,10 @@ public class StackExtensionHelper {
     if (!serviceConfigFolder.exists() || !serviceConfigFolder.isDirectory())
       return;
     
-    File[] configFiles = serviceConfigFolder.listFiles
-            (AmbariMetaInfo.FILENAME_FILTER);
+    File[] configFiles = serviceConfigFolder.listFiles(AmbariMetaInfo.FILENAME_FILTER);
     if (configFiles != null) {
       for (File config : configFiles) {
-        if (config.getName().endsWith
-                (AmbariMetaInfo.SERVICE_CONFIG_FILE_NAME_POSTFIX)) {
+        if (config.getName().endsWith(AmbariMetaInfo.SERVICE_CONFIG_FILE_NAME_POSTFIX)) {
           serviceInfo.getProperties().addAll(getProperties(config));
         }
       }

+ 0 - 15
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/global.xml

@@ -21,16 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
   <property>
     <name>hbase_log_dir</name>
     <value>/var/log/hbase</value>
@@ -85,11 +75,6 @@
     <name>hregion_blockmultiplier</name>
     <value>2</value>
     <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
   </property>
     <property>
     <name>client_scannercaching</name>

+ 7 - 13
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-site.xml

@@ -53,18 +53,18 @@
   </property>
   <property>
     <name>hbase.master.info.bindAddress</name>
-    <value></value>
+    <value>0.0.0.0</value>
     <description>The bind address for the HBase Master web UI
     </description>
   </property>
   <property>
     <name>hbase.master.info.port</name>
-    <value></value>
+    <value>60010</value>
     <description>The port for the HBase Master web UI.</description>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
-    <value></value>
+    <value>60030</value>
     <description>The port for the HBase RegionServer web UI.</description>
   </property>
   <property>
@@ -208,14 +208,14 @@
        values, included here for documentation purposes -->
   <property>
     <name>hbase.master.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HMaster server principal.
     </description>
   </property>
   <property>
     <name>hbase.master.kerberos.principal</name>
-    <value></value>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
     <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
     that should be used to run the HMaster process.  The principal name should
     be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
@@ -225,14 +225,14 @@
   </property>
   <property>
     <name>hbase.regionserver.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HRegionServer server principal.
     </description>
   </property>
   <property>
     <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
     <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
     that should be used to run the HRegionServer process.  The principal name
     should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
@@ -339,12 +339,6 @@
     </description>
   </property>
 
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-  
   <property>
     <name>hbase.zookeeper.useMulti</name>
     <value>true</value>

+ 9 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/core-site.xml

@@ -154,7 +154,7 @@
  </property>
 <property>
   <name>hadoop.security.authorization</name>
-  <value></value>
+  <value>false</value>
   <description>
      Enable authorization for different protocols.
   </description>
@@ -162,7 +162,14 @@
 
   <property>
     <name>hadoop.security.auth_to_local</name>
-    <value></value>
+    <value>
+      RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+      RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+      RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+      RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+      RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+      DEFAULT
+    </value>
 <description>The mapping from kerberos principal names to local OS user names.
   So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
   "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.

+ 0 - 20
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml

@@ -21,31 +21,16 @@
 -->
 
 <configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
   <property>
     <name>dfs_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
   </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
   <property>
     <name>fs_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
   </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
   <property>
     <name>dfs_data_dir</name>
     <value>/hadoop/hdfs/data</value>
@@ -162,11 +147,6 @@
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
   </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-policy.xml

@@ -103,7 +103,7 @@
 
  <property>
     <name>security.admin.operations.protocol.acl</name>
-    <value></value>
+    <value>*</value>
     <description>ACL for AdminOperationsProtocol. Used for admin commands.
     The ACL is a comma-separated list of user and group names. The user and
     group list is separated by a blank. For e.g. "alice,bob users,wheel".
@@ -112,7 +112,7 @@
 
   <property>
     <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
+    <value>*</value>
     <description>ACL for RefreshUserMappingsProtocol. Used to refresh
     users mappings. The ACL is a comma-separated list of user and
     group names. The user and group list is separated by a blank. For
@@ -122,7 +122,7 @@
 
 <property>
     <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
+    <value>*</value>
     <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
     dfsadmin and mradmin commands to refresh the security policy in-effect.
     The ACL is a comma-separated list of user and group names. The user and

+ 10 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hdfs-site.xml

@@ -273,7 +273,7 @@
 
   <property>
     <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
+    <value>nn/_HOST@EXAMPLE.COM</value>
     <description>
       Kerberos principal name for the NameNode
     </description>
@@ -281,7 +281,7 @@
 
   <property>
     <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
+    <value>nn/_HOST@EXAMPLE.COM</value>
     <description>
       Kerberos principal name for the secondary NameNode.
     </description>
@@ -293,14 +293,14 @@
   -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
 
   </property>
@@ -320,7 +320,7 @@
 
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>
       The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
@@ -330,7 +330,7 @@
 
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
@@ -339,7 +339,7 @@
 
   <property>
     <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
+    <value>dn/_HOST@EXAMPLE.COM</value>
     <description>
       The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
@@ -347,7 +347,7 @@
 
   <property>
     <name>dfs.namenode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
@@ -355,7 +355,7 @@
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
@@ -363,7 +363,7 @@
 
   <property>
     <name>dfs.datanode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/dn.service.keytab</value>
     <description>
       The filename of the keytab file for the DataNode.
     </description>

+ 4 - 32
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml

@@ -21,46 +21,18 @@
 -->
 
 <configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
   <property>
     <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
   </property>
   <property>
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
   </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
   <property>
     <name>hive_database_name</name>
     <value></value>

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-site.xml

@@ -56,21 +56,21 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value>false</value>
     <description>If true, the metastore thrift interface will be secured with SASL.
       Clients must authenticate with Kerberos.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
     <description>The path to the Kerberos Keytab file containing the metastore
       thrift server's service principal.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value>hive/_HOST@EXAMPLE.COM</value>
     <description>The service principal for the metastore thrift server. The special
       string _HOST will be replaced automatically with the correct host name.</description>
   </property>

+ 0 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml

@@ -21,16 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
   <property>
     <name>mapred_local_dir</name>
     <value>/hadoop/mapred</value>

+ 6 - 24
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-site.xml

@@ -249,11 +249,6 @@
 
 
   <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
 
   <property>
     <name>mapred.task.timeout</name>
@@ -431,13 +426,6 @@
     <description>The completed job history files are stored at this single well known location.</description>
   </property>
 
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
   <property>
     <name>mapred.jobtracker.maxtasks.per.job</name>
     <value>-1</value>
@@ -471,7 +459,7 @@
 
   <property>
     <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
+    <value>jt/_HOST@EXAMPLE.COM</value>
     <description>
       JT user name key.
     </description>
@@ -479,7 +467,7 @@
 
   <property>
     <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
+    <value>tt/_HOST@EXAMPLE.COM</value>
     <description>
       tt user name key. "_HOST" is replaced by the host name of the task tracker.
     </description>
@@ -498,7 +486,7 @@
 
   <property>
     <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/jt.service.keytab</value>
     <description>
       The keytab for the jobtracker principal.
     </description>
@@ -507,16 +495,10 @@
 
   <property>
     <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/tt.service.keytab</value>
     <description>The filename of the keytab for the task tracker</description>
   </property>
 
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
-
   <property>
     <name>mapreduce.jobtracker.staging.root.dir</name>
     <value>/user</value>
@@ -558,7 +540,7 @@
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>jhs/_HOST@EXAMPLE.COM</value>
     <description>Job history user name key. (must map to same user as JT
       user)</description>
   </property>
@@ -566,7 +548,7 @@
   <property>
     <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-    <value></value>
+    <value>/etc/security/keytabs/jhs.service.keytab</value>
     <description>The keytab for the job history server principal.</description>
   </property>
 

+ 1 - 36
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml

@@ -26,14 +26,9 @@
     <value>oozie</value>
     <description>Oozie User.</description>
   </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
   <property>
     <name>oozie_database</name>
-    <value></value>
+    <value>New Derby Database</value>
     <description>Oozie Server Database.</description>
   </property>
   <property>
@@ -41,36 +36,6 @@
     <value>Derby</value>
     <description>Oozie Derby Database.</description>
   </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
   <property>
     <name>oozie_database_name</name>
     <value>oozie</value>

+ 0 - 16
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-site.xml

@@ -110,14 +110,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.authentication.type</name>
     <value>simple</value>
@@ -127,14 +119,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted NameNode for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.service.WorkflowAppService.system.libpath</name>
     <value>/user/${user.name}/share/lib</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml

@@ -86,7 +86,7 @@ limitations under the License.
 
   <property>
     <name>templeton.hive.properties</name>
-    <value></value>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
     <description>Properties to set when running hive.</description>
   </property>
 

+ 0 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/global.xml

@@ -26,11 +26,6 @@
     <value>zookeeper</value>
     <description>ZooKeeper User.</description>
   </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
   <property>
     <name>zk_data_dir</name>
     <value>/hadoop/zookeeper</value>

+ 0 - 15
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/global.xml

@@ -21,16 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
   <property>
     <name>hbase_log_dir</name>
     <value>/var/log/hbase</value>
@@ -85,11 +75,6 @@
     <name>hregion_blockmultiplier</name>
     <value>2</value>
     <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
   </property>
     <property>
     <name>client_scannercaching</name>

+ 7 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-site.xml

@@ -59,18 +59,18 @@
   </property>
   <property>
     <name>hbase.master.info.bindAddress</name>
-    <value></value>
+    <value>0.0.0.0</value>
     <description>The bind address for the HBase Master web UI
     </description>
   </property>
   <property>
     <name>hbase.master.info.port</name>
-    <value></value>
+    <value>60010</value>
     <description>The port for the HBase Master web UI.</description>
   </property>
   <property>
     <name>hbase.regionserver.info.port</name>
-    <value></value>
+    <value>60030</value>
     <description>The port for the HBase RegionServer web UI.</description>
   </property>
   <property>
@@ -222,14 +222,14 @@
        values, included here for documentation purposes -->
   <property>
     <name>hbase.master.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HMaster server principal.
     </description>
   </property>
   <property>
     <name>hbase.master.kerberos.principal</name>
-    <value></value>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
     <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
     that should be used to run the HMaster process.  The principal name should
     be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
@@ -239,14 +239,14 @@
   </property>
   <property>
     <name>hbase.regionserver.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hbase.service.keytab</value>
     <description>Full path to the kerberos keytab file to use for logging in
     the configured HRegionServer server principal.
     </description>
   </property>
   <property>
     <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
+    <value>hbase/_HOST@EXAMPLE.COM</value>
     <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
     that should be used to run the HRegionServer process.  The principal name
     should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the

+ 0 - 20
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml

@@ -21,31 +21,16 @@
 -->
 
 <configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
   <property>
     <name>dfs_namenode_name_dir</name>
     <value>/hadoop/hdfs/namenode</value>
     <description>NameNode Directories.</description>
   </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
   <property>
     <name>dfs_namenode_checkpoint_dir</name>
     <value>/hadoop/hdfs/namesecondary</value>
     <description>Secondary NameNode checkpoint dir.</description>
   </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
   <property>
     <name>dfs_datanode_data_dir</name>
     <value>/hadoop/hdfs/data</value>
@@ -152,11 +137,6 @@
     <value>EXAMPLE.COM</value>
     <description>Kerberos realm.</description>
   </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
   <property>
     <name>keytab_path</name>
     <value>/etc/security/keytabs</value>

+ 10 - 16
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hdfs-site.xml

@@ -291,7 +291,7 @@
 
   <property>
     <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
+    <value>nn/_HOST@EXAMPLE.COM</value>
     <description>
       Kerberos principal name for the NameNode
     </description>
@@ -299,7 +299,7 @@
 
   <property>
     <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
+    <value>nn/_HOST@EXAMPLE.COM</value>
     <description>
       Kerberos principal name for the secondary NameNode.
     </description>
@@ -311,14 +311,14 @@
   -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COm</value>
     <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
   <property>
     <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
 
   </property>
@@ -332,7 +332,7 @@
 
   <property>
     <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>
       The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
       The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
@@ -342,7 +342,7 @@
 
   <property>
     <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
     <description>
       The Kerberos keytab file with the credentials for the
       HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
@@ -351,7 +351,7 @@
 
   <property>
     <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
+    <value>dn/_HOST@EXAMPLE.COM</value>
     <description>
       The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
@@ -359,7 +359,7 @@
 
   <property>
     <name>dfs.namenode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
@@ -367,7 +367,7 @@
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
     <description>
       Combined keytab file containing the namenode service and host principals.
     </description>
@@ -375,7 +375,7 @@
 
   <property>
     <name>dfs.datanode.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/dn.service.keytab</value>
     <description>
       The filename of the keytab file for the DataNode.
     </description>
@@ -466,12 +466,6 @@
     </description>
   </property>
 
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
   <property>
     <name>dfs.domain.socket.path</name>
     <value>/var/lib/hadoop-hdfs/dn_socket</value>

+ 5 - 33
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml

@@ -21,50 +21,22 @@
 -->
 
 <configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
   <property>
     <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
+    <value>New MySQL Database</value>
+    <description>
+      Property that determines whether the HIVE DB is managed by Ambari.
+    </description>
   </property>
   <property>
     <name>hive_ambari_database</name>
     <value>MySQL</value>
     <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
   </property>
   <property>
     <name>hive_database_name</name>
     <value></value>
-    <description>Database hname</description>
+    <description>Database name.</description>
   </property>    
   <property>
     <name>hive_metastore_user_name</name>

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-site.xml

@@ -57,21 +57,21 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value>false</value>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value>hive/_HOST@EXAMPLE.COM</value>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
   </property>

+ 2 - 37
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml

@@ -26,50 +26,15 @@
     <value>oozie</value>
     <description>Oozie User.</description>
   </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
   <property>
     <name>oozie_database</name>
-    <value></value>
+    <value>New Derby Database</value>
     <description>Oozie Server Database.</description>
   </property>
   <property>
     <name>oozie_derby_database</name>
     <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
+    <description>Oozie Derby Database</description>
   </property>
   <property>
     <name>oozie_database_name</name>

+ 0 - 16
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-site.xml

@@ -110,14 +110,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.authentication.type</name>
     <value>simple</value>
@@ -127,14 +119,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted NameNode for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.service.WorkflowAppService.system.libpath</name>
     <value>/user/${user.name}/share/lib</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml

@@ -98,7 +98,7 @@ limitations under the License.
 
   <property>
     <name>templeton.hive.properties</name>
-    <value></value>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
     <description>Properties to set when running hive.</description>
   </property>
 

+ 0 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/global.xml

@@ -21,11 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
   <property>
     <name>mapred_log_dir_prefix</name>
     <value>/var/log/hadoop-mapreduce</value>

+ 1 - 21
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration-mapred/mapred-site.xml

@@ -119,14 +119,6 @@
     </description>
   </property>
 
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
   <property>
     <name>mapreduce.output.fileoutputformat.compress.type</name>
     <value>BLOCK</value>
@@ -180,8 +172,7 @@
 
   <property>
     <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
+    <value>/etc/security/keytabs/jhs.service.keytab</value>
     <description>The keytab for the job history server principal.</description>
   </property>
 
@@ -285,17 +276,6 @@
     <description>MR App Master process log level.</description>
   </property>
 
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
   <property>
     <name>mapreduce.admin.map.child.java.opts</name>
     <value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>

+ 0 - 10
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/configuration/global.xml

@@ -21,16 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
   <property>
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>

+ 0 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/global.xml

@@ -26,11 +26,6 @@
     <value>zookeeper</value>
     <description>ZooKeeper User.</description>
   </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
   <property>
     <name>zk_data_dir</name>
     <value>/hadoop/zookeeper</value>

+ 6 - 8
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/configuration/falcon-startup.properties.xml

@@ -96,8 +96,6 @@
     <value>file:///hadoop/falcon/store</value>
     <description>Location to store user entity configurations</description>
   </property>
-  <property>
-  </property>
   <property>
     <name>*.system.lib.location</name>
     <value>${falcon.home}/server/webapp/${falcon.app.type}/WEB-INF/lib</value>
@@ -141,7 +139,7 @@
   <!--properties without default values-->
   <property>
     <name>*.falcon.http.authentication.cookie.domain</name>
-    <value></value>
+    <value>EXAMPLE.COM</value>
     <description></description>
   </property>
   <property>
@@ -183,27 +181,27 @@
   <!--kerberos params, must be set during security enabling-->
   <property>
     <name>*.falcon.service.authentication.kerberos.principal</name>
-    <value></value>
+    <value>falcon/_HOST@EXAMPLE.COM</value>
     <description></description>
   </property>
   <property>
     <name>*.falcon.service.authentication.kerberos.keytab</name>
-    <value></value>
+    <value>/etc/security/keytabs/falcon.service.keytab</value>
     <description></description>
   </property>
   <property>
     <name>*.dfs.namenode.kerberos.principal</name>
-    <value></value>
+    <value>nn/_HOST@EXAMPLE.COM</value>
     <description>name node principal to talk to config store</description>
   </property>
   <property>
     <name>*.falcon.http.authentication.kerberos.principal</name>
-    <value></value>
+    <value>HTTP/_HOST@EXAMPLE.COM</value>
     <description>Indicates the Kerberos principal to be used for HTTP endpoint</description>
   </property>
   <property>
     <name>*.falcon.http.authentication.kerberos.keytab</name>
-    <value></value>
+    <value>/etc/security/keytabs/spnego.service.keytab</value>
     <description>Location of the keytab file with the credentials for the HTTP principal</description>
   </property>
 </configuration>

+ 4 - 22
ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/configuration/hive-site.xml

@@ -57,21 +57,21 @@ limitations under the License.
 
   <property>
     <name>hive.metastore.sasl.enabled</name>
-    <value></value>
+    <value>false</value>
     <description>If true, the metastore thrift interface will be secured with SASL.
      Clients must authenticate with Kerberos.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
+    <value>/etc/security/keytabs/hive.service.keytab</value>
     <description>The path to the Kerberos Keytab file containing the metastore
      thrift server's service principal.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.principal</name>
-    <value></value>
+    <value>hive/_HOST@EXAMPLE.COM</value>
     <description>The service principal for the metastore thrift server. The special
     string _HOST will be replaced automatically with the correct host name.</description>
   </property>
@@ -384,7 +384,7 @@ limitations under the License.
   <property>
     <name>hive.server2.tez.default.queues</name>
     <value>default</value>
-    <description></description>
+    <description>A comma-separated list of queues configured for the cluster.</description>
   </property>
 
   <property>
@@ -405,18 +405,6 @@ limitations under the License.
     <description>Select the class to do transaction management. The default DummyTxnManager does no transactions and retains the legacy behavior.</description>
   </property>
 
-  <property>
-    <name>hive.txn.driver</name>
-    <value></value>
-    <description>Gives the jdbc driver to use to connect to the metastore</description>
-  </property>
-
-  <property>
-    <name>hive.txn.connection.string</name>
-    <value></value>
-    <description>Gives the connection string to pass the jdbc driver</description>
-  </property>
-
   <property>
     <name>hive.txn.timeout</name>
     <value>300</value>
@@ -471,10 +459,4 @@ limitations under the License.
     <description>Number of aborted transactions involving a particular table or partition before major compaction is initiated.</description>
   </property>
 
-  <property>
-    <name>hive.users.in.admin.role</name>
-    <value></value>
-    <description>If user is specified as value of this config, that user has superuser privileges in DB</description>
-  </property>
-
 </configuration>

+ 0 - 16
ambari-server/src/main/resources/stacks/HDP/2.1/services/OOZIE/configuration/oozie-site.xml

@@ -110,14 +110,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted job tracker for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.authentication.type</name>
     <value>simple</value>
@@ -127,14 +119,6 @@
     </description>
   </property>
 
-  <property>
-    <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-    <value></value>
-    <description>
-      Whitelisted NameNode for Oozie service.
-    </description>
-  </property>
-
   <property>
     <name>oozie.service.WorkflowAppService.system.libpath</name>
     <value>/user/${user.name}/share/lib</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/configuration/webhcat-site.xml

@@ -98,7 +98,7 @@ limitations under the License.
 
   <property>
     <name>templeton.hive.properties</name>
-    <value></value>
+    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
     <description>Properties to set when running hive.</description>
   </property>
 

+ 0 - 10
ambari-server/src/main/resources/stacks/HDP/2.1/services/YARN/configuration/global.xml

@@ -21,16 +21,6 @@
 -->
 
 <configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
   <property>
     <name>yarn_log_dir_prefix</name>
     <value>/var/log/hadoop-yarn</value>

+ 16 - 14
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -491,23 +491,22 @@ public class AmbariMetaInfoTest {
     PropertyInfo deleteProperty2 = null;
     PropertyInfo redefinedProperty1 = null;
     PropertyInfo redefinedProperty2 = null;
+    PropertyInfo redefinedProperty3 = null;
     PropertyInfo inheritedProperty = null;
     PropertyInfo newProperty = null;
     PropertyInfo originalProperty = null;
 
     for (PropertyInfo propertyInfo : redefinedService.getProperties()) {
-      if (propertyInfo.getName().equals("yarn.resourcemanager" +
-        ".resource-tracker.address")) {
+      if (propertyInfo.getName().equals("yarn.resourcemanager.resource-tracker.address")) {
         deleteProperty1 = propertyInfo;
-      } else if (propertyInfo.getName().equals("yarn.resourcemanager" +
-        ".scheduler.address")) {
+      } else if (propertyInfo.getName().equals("yarn.resourcemanager.scheduler.address")) {
         deleteProperty2 = propertyInfo;
-      } else if (propertyInfo.getName().equals("yarn.resourcemanager" +
-        ".address")) {
+      } else if (propertyInfo.getName().equals("yarn.resourcemanager.address")) {
         redefinedProperty1 = propertyInfo;
-      } else if (propertyInfo.getName().equals("yarn.resourcemanager.admin" +
-        ".address")) {
+      } else if (propertyInfo.getName().equals("yarn.resourcemanager.admin.address")) {
         redefinedProperty2 = propertyInfo;
+      } else if (propertyInfo.getName().equals("yarn.nodemanager.health-checker.interval-ms")) {
+        redefinedProperty3 = propertyInfo;
       } else if (propertyInfo.getName().equals("yarn.nodemanager.address")) {
         inheritedProperty = propertyInfo;
       } else if (propertyInfo.getName().equals("new-yarn-property")) {
@@ -524,9 +523,11 @@ public class AmbariMetaInfoTest {
     Assert.assertNotNull("yarn.nodemanager.address expected to be inherited " +
       "from parent", inheritedProperty);
     Assert.assertEquals("localhost:100009", redefinedProperty1.getValue());
-    // Parent property value will result in property being present in the
-    // child stack
-    Assert.assertEquals("localhost:8141", redefinedProperty2.getValue());
+    // Parent property value will result in property being present in the child stack
+    Assert.assertNotNull(redefinedProperty3);
+    Assert.assertEquals("135000", redefinedProperty3.getValue());
+    // Child can override parent property to empty value
+    Assert.assertEquals("", redefinedProperty2.getValue());
     // New property
     Assert.assertNotNull(newProperty);
     Assert.assertEquals("some-value", newProperty.getValue());
@@ -557,8 +558,8 @@ public class AmbariMetaInfoTest {
     method.setAccessible(true);
     StackExtensionHelper helper = new StackExtensionHelper(metaInfo.getStackRoot());
     helper.fillInfo();
-    Map<String, List<StackInfo>> stacks = (Map<String, List<StackInfo>>)
-      method.invoke(helper, allStacks);
+    Map<String, List<StackInfo>> stacks =
+      (Map<String, List<StackInfo>>) method.invoke(helper, allStacks);
 
     Assert.assertNotNull(stacks.get("2.0.99"));
     // Verify order
@@ -621,7 +622,8 @@ public class AmbariMetaInfoTest {
   @Test
   public void testPropertyCount() throws Exception {
     Set<PropertyInfo> properties = metaInfo.getProperties(STACK_NAME_HDP, STACK_VERSION_HDP_02, SERVICE_NAME_HDFS);
-    Assert.assertEquals(81, properties.size());
+    // 3 empty properties
+    Assert.assertEquals(84, properties.size());
   }
 
   @Test

+ 10 - 2
ambari-server/src/test/java/org/apache/ambari/server/api/util/StackExtensionHelperTest.java

@@ -22,6 +22,7 @@ import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.state.*;
 
 import java.io.File;
+import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
@@ -66,7 +67,7 @@ public class StackExtensionHelperTest {
         assertEquals("MASTER", components.get(0).getCategory());
         List<PropertyInfo> properties = serviceInfo.getProperties();
         // Check some property
-        assertEquals(35, properties.size());
+        assertEquals(38, properties.size());
         boolean found = false;
         for (PropertyInfo property : properties) {
           if (property.getName().equals("javax.jdo.option.ConnectionDriverName")) {
@@ -156,7 +157,14 @@ public class StackExtensionHelperTest {
         assertEquals(50, serviceScriptDefinition.getTimeout());
         // Check some property
         List<PropertyInfo> properties = serviceInfo.getProperties();
-        assertEquals(38, properties.size());
+        List<PropertyInfo> emptyValueProperties = new ArrayList<PropertyInfo>();
+        for (PropertyInfo propertyInfo : properties) {
+          if (propertyInfo.getValue().isEmpty()) {
+            emptyValueProperties.add(propertyInfo);
+          }
+        }
+        assertEquals(28, emptyValueProperties.size());
+        assertEquals(66, properties.size());
         boolean found = false;
         for (PropertyInfo property : properties) {
           if (property.getName().equals("hbase.cluster.distributed")) {

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -151,7 +151,7 @@ public class AmbariManagementControllerTest {
   private static final int STACK_VERSIONS_CNT = 12;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 1;
-  private static final int STACK_PROPERTIES_CNT = 81;
+  private static final int STACK_PROPERTIES_CNT = 84;
   private static final int STACK_COMPONENTS_CNT = 3;
   private static final int OS_CNT = 2;
 

+ 34 - 0
ambari-web/app/data/HDP2/site_properties.js

@@ -470,6 +470,14 @@ module.exports =
       "category": "Hive Metastore",
       "index": 4
     },
+    {
+      "id": "site property",
+      "name": "hive.server2.tez.default.queues",
+      "displayName": "hive.server2.tez.default.queues",
+      "isRequired": false,
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
   /**********************************************tez-site*****************************************/
     {
       "id": "site property",
@@ -690,6 +698,22 @@ module.exports =
       "serviceName": "HBASE",
       "index": 5
     },
+    {
+      "id": "site property",
+      "name": "hbase.coprocessor.region.classes",
+      "displayName": "hbase.coprocessor.region.classes",
+      "category": "Advanced",
+      "isRequired": false,
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.coprocessor.master.classes",
+      "displayName": "hbase.coprocessor.master.classes",
+      "category": "Advanced",
+      "isRequired": false,
+      "serviceName": "HBASE"
+    },
     {
       "id": "site property",
       "name": "hbase.zookeeper.quorum",
@@ -1466,6 +1490,16 @@ module.exports =
       "serviceName": "FALCON",
       "filename": "falcon-startup.properties.xml"
     },
+   {
+      "id": "site property",
+      "name": "*.falcon.http.authentication.blacklisted.users",
+      "displayName": "*.falcon.http.authentication.blacklisted.users",
+      "isRequired": false,
+      "category": "FalconStartupSite",
+      "serviceName": "FALCON",
+      "filename": "falcon-startup.properties.xml"
+    },
+
   /**********************************************webhcat-site***************************************/
     {
       "id": "site property",

+ 16 - 0
ambari-web/app/data/site_properties.js

@@ -665,6 +665,22 @@ module.exports =
       "serviceName": "HBASE",
       "index": 5
     },
+    {
+      "id": "site property",
+      "name": "hbase.coprocessor.region.classes",
+      "displayName": "hbase.coprocessor.region.classes",
+      "category": "Advanced",
+      "isRequired": false,
+      "serviceName": "HBASE"
+    },
+    {
+      "id": "site property",
+      "name": "hbase.coprocessor.master.classes",
+      "displayName": "hbase.coprocessor.master.classes",
+      "category": "Advanced",
+      "isRequired": false,
+      "serviceName": "HBASE"
+    },
     {
       "id": "site property",
       "name": "dfs.client.read.shortcircuit",

+ 1 - 1
ambari-web/app/utils/helper.js

@@ -604,7 +604,7 @@ App.registerBoundHelper('formatWordBreak', Em.View.extend({
    * @type {string}
    */
   result: function() {
-    return this.get('content').replace(/\./g, '.<wbr />');
+    return this.get('content') && this.get('content').replace(/\./g, '.<wbr />');
   }.property('content')
 }));
 

+ 1 - 1
ambari-web/app/views/common/configs/services_config.js

@@ -350,7 +350,7 @@ App.ServiceConfigsByCategoryView = Ember.View.extend({
        });
      }
 
-     if (filter != null) {
+     if (filter != null && typeof searchString === "string") {
        return searchString.toLowerCase().indexOf(filter) > -1;
      } else {
        return true;