Quellcode durchsuchen

AMBARI-11563. Blueprints does not filter out conditional properties from cluster configuration. (rnettleton)

Bob Nettleton vor 10 Jahren
Ursprung
Commit
517bdeb4a9

+ 261 - 8
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -19,6 +19,7 @@
 package org.apache.ambari.server.controller.internal;
 
 
+import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.topology.Cardinality;
 import org.apache.ambari.server.topology.ClusterTopology;
 import org.apache.ambari.server.topology.Configuration;
@@ -110,7 +111,26 @@ public class BlueprintConfigurationProcessor {
    * This will initially be used to filter out the Ranger Passwords, but
    * could be extended in the future for more generic purposes.
    */
-  private static final PropertyFilter[] propertyFilters = { new PasswordPropertyFilter() };
+  private static final PropertyFilter[] exportPropertyFilters = { new PasswordPropertyFilter() };
+
+  /**
+   * Statically-defined list of filters to apply on cluster config
+   * property updates.
+   *
+   * This will initially be used to filter out properties that do not
+   * need to be set, due to a given dependency property not having
+   * an expected value.
+   *
+   * The UI uses the Recommendations/StackAdvisor APIs to accomplish this, but
+   * Blueprints will use filters in the short-term, and hopefully move to a more
+   * unified approach in the next release.
+   *
+   * This filter approach will also be used to remove properties in a given component
+   * that are not valid in a High-Availability deployment (example: HDFS NameNode HA).
+   */
+  private static final PropertyFilter[] clusterUpdatePropertyFilters =
+    { new DependencyEqualsFilter("hbase.security.authorization", "hbase-site", "true"),
+      new DependencyNotEqualsFilter("hive.server2.authentication", "hive-site", "NONE") };
 
   /**
    * Configuration properties to be updated
@@ -167,6 +187,10 @@ public class BlueprintConfigurationProcessor {
     Map<String, Map<String, String>> clusterProps = clusterConfig.getFullProperties();
     Map<String, HostGroupInfo> groupInfoMap = clusterTopology.getHostGroupInfo();
 
+    // filter out any properties that should not be included, based on the dependencies
+    // specified in the stacks, and the filters defined in this class
+    doFilterPriorToClusterUpdate(clusterProps);
+
     for (Map<String, Map<String, PropertyUpdater>> updaterMap : createCollectionOfUpdaters()) {
       for (Map.Entry<String, Map<String, PropertyUpdater>> entry : updaterMap.entrySet()) {
         String type = entry.getKey();
@@ -265,14 +289,34 @@ public class BlueprintConfigurationProcessor {
    *
    * @param properties config properties to process for filtering
    */
-  private static void doFilterPriorToExport(Map<String, Map<String, String>> properties) {
+  private void doFilterPriorToExport(Map<String, Map<String, String>> properties) {
     for (String configType : properties.keySet()) {
       Map<String, String> configPropertiesPerType =
         properties.get(configType);
 
       Set<String> propertiesToExclude = new HashSet<String>();
       for (String propertyName : configPropertiesPerType.keySet()) {
-        if (shouldPropertyBeExcluded(propertyName, configPropertiesPerType.get(propertyName))) {
+        if (shouldPropertyBeExcludedForBlueprintExport(propertyName, configPropertiesPerType.get(propertyName), configType, clusterTopology)) {
+          propertiesToExclude.add(propertyName);
+        }
+      }
+
+      if (!propertiesToExclude.isEmpty()) {
+        for (String propertyName : propertiesToExclude) {
+          configPropertiesPerType.remove(propertyName);
+        }
+      }
+    }
+  }
+
+  private void doFilterPriorToClusterUpdate(Map<String, Map<String, String>> properties) {
+    for (String configType : properties.keySet()) {
+      Map<String, String> configPropertiesPerType =
+        properties.get(configType);
+
+      Set<String> propertiesToExclude = new HashSet<String>();
+      for (String propertyName : configPropertiesPerType.keySet()) {
+        if (shouldPropertyBeExcludedForClusterUpdate(propertyName, configPropertiesPerType.get(propertyName), configType, this.clusterTopology)) {
           propertiesToExclude.add(propertyName);
         }
       }
@@ -647,12 +691,37 @@ public class BlueprintConfigurationProcessor {
    *
    * @param propertyName config property name
    * @param propertyValue config property value
+   * @param propertyType config type that contains this property
+   * @param topology cluster topology instance
    * @return true if the property should be excluded
    *         false if the property should not be excluded
    */
-  private static boolean shouldPropertyBeExcluded(String propertyName, String propertyValue) {
-    for(PropertyFilter filter : propertyFilters) {
-      if (!filter.isPropertyIncluded(propertyName, propertyValue)) {
+  private static boolean shouldPropertyBeExcludedForBlueprintExport(String propertyName, String propertyValue, String propertyType, ClusterTopology topology) {
+    for(PropertyFilter filter : exportPropertyFilters) {
+      if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology)) {
+        return true;
+      }
+    }
+
+    // if no filters require that the property be excluded,
+    // then allow it to be included in the property collection
+    return false;
+  }
+
+  /**
+   * Convenience method to iterate over the cluster update filters, and determine if a given property
+   * should be excluded from a collection.
+   *
+   * @param propertyName name of property to examine
+   * @param propertyValue value of the current property
+   * @param propertyType configuration type that contains this property
+   * @param topology the cluster topology instance
+   * @return true if the given property should be excluded
+   *         false if the given property should be included
+   */
+  private static boolean shouldPropertyBeExcludedForClusterUpdate(String propertyName, String propertyValue, String propertyType, ClusterTopology topology) {
+    for(PropertyFilter filter : clusterUpdatePropertyFilters) {
+      if (!filter.isPropertyIncluded(propertyName, propertyValue, propertyType, topology)) {
         return true;
       }
     }
@@ -662,6 +731,8 @@ public class BlueprintConfigurationProcessor {
     return false;
   }
 
+
+
   /**
    * Update single host topology configuration properties for blueprint export.
    *
@@ -1997,10 +2068,12 @@ public class BlueprintConfigurationProcessor {
      *
      * @param propertyName property name
      * @param propertyValue property value
+     * @param configType config type that contains this property
+     * @param topology cluster topology instance
      * @return true if the property should be included
      *         false if the property should not be included
      */
-    boolean isPropertyIncluded(String propertyName, String propertyValue);
+    boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology);
   }
 
   /**
@@ -2027,14 +2100,194 @@ public class BlueprintConfigurationProcessor {
      *
      * @param propertyName property name
      * @param propertyValue property value
+     * @param configType config type that contains this property
+     * @param topology cluster topology instance
      *
      * @return true if the property should be included
      *         false if the property should not be included
      */
     @Override
-    public boolean isPropertyIncluded(String propertyName, String propertyValue) {
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
       return !PASSWORD_NAME_REGEX.matcher(propertyName).matches();
     }
   }
 
+
+  /**
+   * Filter implementation that determines if a property should be included in
+   * a collection by inspecting the configuration dependencies included in the
+   * stack definitions for a given property.
+   *
+   * The DependencyFilter is initialized with a given property that is listed
+   * as a dependency of some properties in the stacks. If the dependency is found,
+   * it must match a given condition (implemented in concrete subclasses) in
+   * order to be included in a collection.
+   */
+  private static abstract class DependencyFilter implements PropertyFilter {
+
+    private final String dependsOnPropertyName;
+
+    private final String dependsOnConfigType;
+
+    DependencyFilter(String dependsOnPropertyName, String dependsOnConfigType) {
+      this.dependsOnPropertyName = dependsOnPropertyName;
+      this.dependsOnConfigType = dependsOnConfigType;
+    }
+
+
+    /**
+     * Inspects stack dependencies to determine if a given property
+     * should be included in a collection.
+     *
+     * @param propertyName property name
+     * @param propertyValue property value
+     * @param configType config type that contains this property
+     * @param topology cluster topology instance
+     *
+     * @return true if the property should be included
+     *         false if the property should not be included
+     */
+    @Override
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      Stack stack = topology.getBlueprint().getStack();
+      Configuration configuration = topology.getConfiguration();
+
+      final String serviceName = stack.getServiceForConfigType(configType);
+      Map<String, Stack.ConfigProperty> typeProperties =
+        stack.getConfigurationPropertiesWithMetadata(serviceName, configType);
+
+      Stack.ConfigProperty configProperty = typeProperties.get(propertyName);
+      if (configProperty != null) {
+        Set<PropertyDependencyInfo> dependencyInfos = configProperty.getDependsOnProperties();
+        if (dependencyInfos != null) {
+          // iterate over the dependencies specified for this property in the stack
+          for (PropertyDependencyInfo propertyDependencyInfo : dependencyInfos) {
+            if (propertyDependencyInfo.getName().equals(dependsOnPropertyName) && (propertyDependencyInfo.getType().equals(dependsOnConfigType))) {
+              // this property depends upon one of the registered dependency properties
+              Map<String, Map<String, String>> clusterConfig = configuration.getFullProperties();
+              Map<String, String> configByType = clusterConfig.get(dependsOnConfigType);
+              return isConditionSatisfied(dependsOnPropertyName, configByType.get(dependsOnPropertyName), dependsOnConfigType);
+            }
+          }
+        }
+      }
+
+      // always include properties by default, unless a defined
+      // filter is found and the condition specified by the filter
+      // is not satisfied
+      return true;
+    }
+
+    /**
+     * Abstract method used to determine if the value of a given dependency property
+     * meets a given condition.
+     *
+     * @param propertyName name of property
+     * @param propertyValue value of property
+     * @param propertyType configuration type of contains this property
+     * @return  true if the condition is satisfied for this property
+     *          false if the condition is not satisfied for this property
+     */
+    public abstract boolean isConditionSatisfied(String propertyName, String propertyValue, String propertyType);
+
+  }
+
+  /**
+   * DependencyFilter subclass that requires that the specified
+   * dependency have a specific value in order for properties that
+   * depend on it to be included in a collection.
+   */
+  private static class DependencyEqualsFilter extends DependencyFilter {
+
+    private final String value;
+
+    DependencyEqualsFilter(String dependsOnPropertyName, String dependsOnConfigType, String value) {
+      super(dependsOnPropertyName, dependsOnConfigType);
+
+      this.value = value;
+    }
+
+    /**
+     *
+     * @param propertyName name of property
+     * @param propertyValue value of property
+     * @param propertyType configuration type of contains this property
+     * @return true if the property is equal to the expected value
+     *         false if the property does not equal the expected value
+     */
+    @Override
+    public boolean isConditionSatisfied(String propertyName, String propertyValue, String propertyType) {
+      return value.equals(propertyValue);
+    }
+  }
+
+  /**
+   * DependencyFilter subclass that requires that the specified
+   * dependency not have the specified value in order for properties that
+   * depend on it to be included in a collection.
+   */
+  private static class DependencyNotEqualsFilter extends DependencyFilter {
+
+    private final String value;
+
+    DependencyNotEqualsFilter(String dependsOnPropertyName, String dependsOnConfigType, String value) {
+      super(dependsOnPropertyName, dependsOnConfigType);
+
+      this.value = value;
+    }
+
+    /**
+     *
+     * @param propertyName name of property
+     * @param propertyValue value of property
+     * @param propertyType configuration type of contains this property
+     * @return true if the property is not equal to the expected value
+     *         false if the property is equal to the expected value
+     *
+     */
+    @Override
+    public boolean isConditionSatisfied(String propertyName, String propertyValue, String propertyType) {
+      return !value.equals(propertyValue);
+    }
+  }
+
+  /**
+   * Filter implementation that scans for HDFS NameNode properties that should be
+   * removed/ignored when HDFS NameNode HA is enabled.
+   */
+  private static class HDFSNameNodeHAFilter implements PropertyFilter {
+
+    /**
+     * Set of HDFS Property names that are only valid in a non-HA scenario.
+     * In an HA setup, the property names include the names of the nameservice and
+     * namenode.
+     */
+    private final Set<String> setOfHDFSPropertyNamesNonHA =
+      Collections.unmodifiableSet( new HashSet<String>(Arrays.asList("dfs.namenode.http-address", "dfs.namenode.https-address", "dfs.namenode.rpc-address")));
+
+
+    /**
+     *
+     * @param propertyName property name
+     * @param propertyValue property value
+     * @param configType config type that contains this property
+     * @param topology cluster topology instance
+     *
+     * @return true if the property should be included
+     *         false if the property should not be included
+     */
+    @Override
+    public boolean isPropertyIncluded(String propertyName, String propertyValue, String configType, ClusterTopology topology) {
+      if (topology.isNameNodeHAEnabled()) {
+        if (setOfHDFSPropertyNamesNonHA.contains(propertyName)) {
+          return false;
+        }
+      }
+
+      return true;
+    }
+  }
+
+
+
 }

+ 13 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/Stack.java

@@ -38,6 +38,7 @@ import org.apache.ambari.server.orm.entities.StackEntity;
 import org.apache.ambari.server.state.AutoDeployInfo;
 import org.apache.ambari.server.state.ComponentInfo;
 import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.topology.Cardinality;
 import org.apache.ambari.server.topology.Configuration;
@@ -313,6 +314,10 @@ public class Stack {
     return configMap;
   }
 
+  public Map<String, ConfigProperty> getConfigurationPropertiesWithMetadata(String service, String type) {
+    return serviceConfigurations.get(service).get(type);
+  }
+
   /**
    * Get all required config properties for the specified service.
    *
@@ -721,13 +726,16 @@ public class Stack {
     private Map<String, String> attributes;
     private Set<PropertyInfo.PropertyType> propertyTypes;
     private String type;
+    private Set<PropertyDependencyInfo> dependsOnProperties =
+      Collections.emptySet();
 
-    private ConfigProperty(StackConfigurationResponse config) {
+    ConfigProperty(StackConfigurationResponse config) {
       this.name = config.getPropertyName();
       this.value = config.getPropertyValue();
       this.attributes = config.getPropertyAttributes();
       this.propertyTypes = config.getPropertyType();
       this.type = normalizeType(config.getType());
+      this.dependsOnProperties = config.getDependsOnProperties();
     }
 
     public ConfigProperty(String type, String name, String value) {
@@ -768,6 +776,10 @@ public class Stack {
       this.attributes = attributes;
     }
 
+    Set<PropertyDependencyInfo> getDependsOnProperties() {
+      return this.dependsOnProperties;
+    }
+
     private String normalizeType(String type) {
       //strip .xml from type
       if (type.endsWith(".xml")) {

+ 280 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessorTest.java

@@ -39,6 +39,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.ambari.server.state.PropertyDependencyInfo;
 import org.apache.ambari.server.state.ServiceInfo;
 import org.apache.ambari.server.topology.AmbariContext;
 import org.apache.ambari.server.topology.Blueprint;
@@ -80,6 +81,7 @@ public class BlueprintConfigurationProcessorTest {
     expect(stack.getVersion()).andReturn("1").anyTimes();
     // return false for all components since for this test we don't care about the value
     expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+    expect(stack.getConfigurationPropertiesWithMetadata(anyObject(String.class), anyObject(String.class))).andReturn(Collections.<String, Stack.ConfigProperty>emptyMap()).anyTimes();
 
     expect(serviceInfo.getRequiredProperties()).andReturn(
         Collections.<String, org.apache.ambari.server.state.PropertyInfo>emptyMap()).anyTimes();
@@ -3381,6 +3383,283 @@ public class BlueprintConfigurationProcessorTest {
       webHCatSiteProperties.get("templeton.hive.properties"));
   }
 
+  @Test
+  public void testHiveConfigClusterUpdatePropertiesFilterAuthenticationOff() throws Exception {
+    // reset the stack mock, since we need more than the default behavior for this test
+    reset(stack);
+
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hiveSiteProperties = new HashMap<String, String>();
+    properties.put("hive-site", hiveSiteProperties);
+
+    // setup properties for Hive to simulate the case of Hive Authentication being off
+    hiveSiteProperties.put("hive.server2.authentication", "NONE");
+    hiveSiteProperties.put("hive.server2.authentication.kerberos.keytab", " ");
+    hiveSiteProperties.put("hive.server2.authentication.kerberos.principal", " ");
+
+    Map<String, Stack.ConfigProperty> mapOfMetadata =
+      new HashMap<String, Stack.ConfigProperty>();
+
+    // simulate the stack dependencies for these Hive properties, that are dependent upon
+    // hive.server2.authorization being enabled
+    Stack.ConfigProperty configProperty1 =
+      new Stack.ConfigProperty("hive-site", "hive.server2.authentication.kerberos.keytab", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hive-site", "hive.server2.authentication");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    Stack.ConfigProperty configProperty2 =
+      new Stack.ConfigProperty("hive-site", "hive.server2.authentication.kerberos.principal", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hive-site", "hive.server2.authentication");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    mapOfMetadata.put("hive.server2.authentication.kerberos.keytab", configProperty1);
+    mapOfMetadata.put("hive.server2.authentication.kerberos.principal", configProperty2);
+
+    // defaults from init() method that we need
+    expect(stack.getName()).andReturn("testStack").anyTimes();
+    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+
+    // customized stack calls for this test only
+    expect(stack.getServiceForConfigType("hive-site")).andReturn("HIVE").atLeastOnce();
+    expect(stack.getConfigurationPropertiesWithMetadata("HIVE", "hive-site")).andReturn(mapOfMetadata).atLeastOnce();
+
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("some-hose");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, hosts);
+
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertFalse("hive.server2.authentication.kerberos.keytab should have been filtered out of configuration",
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
+    assertFalse("hive.server2.authentication.kerberos.principal should have been filtered out of configuration",
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
+  }
+
+  @Test
+  public void testHiveConfigClusterUpdatePropertiesFilterAuthenticationOn() throws Exception {
+    // reset the stack mock, since we need more than the default behavior for this test
+    reset(stack);
+
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hiveSiteProperties = new HashMap<String, String>();
+    properties.put("hive-site", hiveSiteProperties);
+
+    // setup properties for Hive to simulate the case of Hive Authentication being on,
+    // and set to KERBEROS
+    hiveSiteProperties.put("hive.server2.authentication", "KERBEROS");
+    hiveSiteProperties.put("hive.server2.authentication.kerberos.keytab", " ");
+    hiveSiteProperties.put("hive.server2.authentication.kerberos.principal", " ");
+
+    Map<String, Stack.ConfigProperty> mapOfMetadata =
+      new HashMap<String, Stack.ConfigProperty>();
+
+    // simulate the stack dependencies for these Hive properties, that are dependent upon
+    // hive.server2.authorization being enabled
+    Stack.ConfigProperty configProperty1 =
+      new Stack.ConfigProperty("hive-site", "hive.server2.authentication.kerberos.keytab", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hive-site", "hive.server2.authentication");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    Stack.ConfigProperty configProperty2 =
+      new Stack.ConfigProperty("hive-site", "hive.server2.authentication.kerberos.principal", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hive-site", "hive.server2.authentication");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    mapOfMetadata.put("hive.server2.authentication.kerberos.keytab", configProperty1);
+    mapOfMetadata.put("hive.server2.authentication.kerberos.principal", configProperty2);
+
+    // defaults from init() method that we need
+    expect(stack.getName()).andReturn("testStack").anyTimes();
+    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+
+    // customized stack calls for this test only
+    expect(stack.getServiceForConfigType("hive-site")).andReturn("HIVE").atLeastOnce();
+    expect(stack.getConfigurationPropertiesWithMetadata("HIVE", "hive-site")).andReturn(mapOfMetadata).atLeastOnce();
+
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("some-hose");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, hosts);
+
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertTrue("hive.server2.authentication.kerberos.keytab should have been included in configuration",
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.keytab"));
+    assertTrue("hive.server2.authentication.kerberos.principal should have been included in configuration",
+      hiveSiteProperties.containsKey("hive.server2.authentication.kerberos.principal"));
+  }
+
+  @Test
+  public void testHBaseConfigClusterUpdatePropertiesFilterAuthorizationOff() throws Exception {
+    // reset the stack mock, since we need more than the default behavior for this test
+    reset(stack);
+
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hbaseSiteProperties = new HashMap<String, String>();
+    properties.put("hbase-site", hbaseSiteProperties);
+
+    // setup properties for HBase to simulate the case of authorization being off
+    hbaseSiteProperties.put("hbase.security.authorization", "false");
+    hbaseSiteProperties.put("hbase.coprocessor.regionserver.classes", " ");
+
+    Map<String, Stack.ConfigProperty> mapOfMetadata =
+      new HashMap<String, Stack.ConfigProperty>();
+
+    // simulate the stack dependencies for these Hive properties, that are dependent upon
+    // hive.server2.authorization being enabled
+    Stack.ConfigProperty configProperty1 =
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.regionserver.classes", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    mapOfMetadata.put("hbase.coprocessor.regionserver.classes", configProperty1);
+
+    // defaults from init() method that we need
+    expect(stack.getName()).andReturn("testStack").anyTimes();
+    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+
+    // customized stack calls for this test only
+    expect(stack.getServiceForConfigType("hbase-site")).andReturn("HBASE").atLeastOnce();
+    expect(stack.getConfigurationPropertiesWithMetadata("HBASE", "hbase-site")).andReturn(mapOfMetadata).atLeastOnce();
+
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("some-hose");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, hosts);
+
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertFalse("hbase.coprocessor.regionserver.classes should have been filtered out of configuration",
+      hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
+
+  }
+
+  @Test
+  public void testHBaseConfigClusterUpdatePropertiesFilterAuthorizationOn() throws Exception {
+    // reset the stack mock, since we need more than the default behavior for this test
+    reset(stack);
+
+    final String expectedHostGroupName = "host_group_1";
+
+    Map<String, Map<String, String>> properties = new HashMap<String, Map<String, String>>();
+    Map<String, String> hbaseSiteProperties = new HashMap<String, String>();
+    properties.put("hbase-site", hbaseSiteProperties);
+
+    // setup properties for HBase to simulate the case of authorization being off
+    hbaseSiteProperties.put("hbase.security.authorization", "true");
+    hbaseSiteProperties.put("hbase.coprocessor.regionserver.classes", " ");
+
+    Map<String, Stack.ConfigProperty> mapOfMetadata =
+      new HashMap<String, Stack.ConfigProperty>();
+
+    // simulate the stack dependencies for these Hive properties, that are dependent upon
+    // hive.server2.authorization being enabled
+    Stack.ConfigProperty configProperty1 =
+      new Stack.ConfigProperty("hbase-site", "hbase.coprocessor.regionserver.classes", " ") {
+        @Override
+        Set<PropertyDependencyInfo> getDependsOnProperties() {
+          PropertyDependencyInfo dependencyInfo = new PropertyDependencyInfo("hbase-site", "hbase.security.authorization");
+          return Collections.singleton(dependencyInfo);
+        }
+      };
+
+    mapOfMetadata.put("hbase.coprocessor.regionserver.classes", configProperty1);
+
+    // defaults from init() method that we need
+    expect(stack.getName()).andReturn("testStack").anyTimes();
+    expect(stack.getVersion()).andReturn("1").anyTimes();
+    expect(stack.isMasterComponent((String) anyObject())).andReturn(false).anyTimes();
+
+    // customized stack calls for this test only
+    expect(stack.getServiceForConfigType("hbase-site")).andReturn("HBASE").atLeastOnce();
+    expect(stack.getConfigurationPropertiesWithMetadata("HBASE", "hbase-site")).andReturn(mapOfMetadata).atLeastOnce();
+
+    Configuration clusterConfig = new Configuration(properties, Collections.<String, Map<String, Map<String, String>>>emptyMap());
+
+    Collection<String> hgComponents = new HashSet<String>();
+    hgComponents.add("NAMENODE");
+    List<String> hosts = new ArrayList<String>();
+    hosts.add("some-hose");
+    TestHostGroup group1 = new TestHostGroup(expectedHostGroupName, hgComponents, hosts);
+
+
+    Collection<TestHostGroup> hostGroups = new HashSet<TestHostGroup>();
+    hostGroups.add(group1);
+
+    ClusterTopology topology = createClusterTopology(bp, clusterConfig, hostGroups);
+    BlueprintConfigurationProcessor updater = new BlueprintConfigurationProcessor(topology);
+
+    // call top-level cluster config update method
+    updater.doUpdateForClusterCreate();
+
+    assertTrue("hbase.coprocessor.regionserver.classes should have been included in configuration",
+      hbaseSiteProperties.containsKey("hbase.coprocessor.regionserver.classes"));
+
+  }
+
   @Test
   public void testHiveConfigClusterUpdateDefaultValue() throws Exception {
     final String expectedHostGroupName = "host_group_1";
@@ -3670,6 +3949,7 @@ public class BlueprintConfigurationProcessorTest {
 
     assertEquals("instance.volumes should not be modified by cluster update when NameNode HA is enabled.",
         "hdfs://" + expectedNameService + "/accumulo/test/instance/volumes", accumuloSiteProperties.get("instance.volumes"));
+
   }
 
   @Test

+ 32 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/StackTest.java

@@ -27,19 +27,23 @@ import org.apache.ambari.server.controller.StackServiceComponentRequest;
 import org.apache.ambari.server.controller.StackServiceComponentResponse;
 import org.apache.ambari.server.controller.StackServiceRequest;
 import org.apache.ambari.server.controller.StackServiceResponse;
-import org.apache.ambari.server.state.DependencyInfo;
+import org.apache.ambari.server.state.*;
+import org.apache.ambari.server.state.PropertyInfo;
 import org.apache.ambari.server.topology.Configuration;
 import org.easymock.Capture;
 import org.easymock.EasyMock;
+import org.easymock.EasyMockSupport;
 import org.junit.Test;
 
 import java.util.Collections;
+import java.util.HashSet;
 import java.util.Set;
 
 import static org.easymock.EasyMock.capture;
 import static org.easymock.EasyMock.expect;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
 import static org.powermock.api.easymock.PowerMock.createNiceMock;
 import static org.powermock.api.easymock.PowerMock.replay;
 
@@ -114,4 +118,31 @@ public class StackTest {
     assertNull(stackComponentRequest.getComponentName());
   }
 
+  @Test
+  public void testConfigPropertyReadsInDependencies() throws Exception {
+    EasyMockSupport mockSupport = new EasyMockSupport();
+
+    Set<PropertyDependencyInfo> setOfDependencyInfo = new HashSet<PropertyDependencyInfo>();
+
+    StackConfigurationResponse mockResponse = mockSupport.createMock(StackConfigurationResponse.class);
+    expect(mockResponse.getPropertyName()).andReturn("test-property-one");
+    expect(mockResponse.getPropertyValue()).andReturn("test-value-one");
+    expect(mockResponse.getPropertyAttributes()).andReturn(Collections.<String, String>emptyMap());
+    expect(mockResponse.getPropertyType()).andReturn(Collections.<PropertyInfo.PropertyType>emptySet());
+    expect(mockResponse.getType()).andReturn("test-type-one");
+    expect(mockResponse.getDependsOnProperties()).andReturn(setOfDependencyInfo);
+
+    mockSupport.replayAll();
+
+    Stack.ConfigProperty configProperty =
+      new Stack.ConfigProperty(mockResponse);
+
+    assertSame("DependencyInfo was not properly parsed from the stack response object",
+          setOfDependencyInfo, configProperty.getDependsOnProperties());
+
+
+    mockSupport.verifyAll();
+
+  }
+
 }