浏览代码

AMBARI-16994: Ambari Server Upgrade should always update stack_features and stack_tools config properties (jluniya)

Jayush Luniya 9 年之前
父节点
当前提交
95279139f4

+ 14 - 249
ambari-common/src/main/python/resource_management/libraries/functions/stack_features.py

@@ -20,242 +20,7 @@ limitations under the License.
 
 # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import ambari_simplejson as json
-
-_DEFAULT_STACK_FEATURES = {
-  "stack_features": [
-    {
-      "name": "snappy",
-      "description": "Snappy compressor/decompressor support",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "lzo",
-      "description": "LZO libraries support",
-      "min_version": "2.2.1.0"
-    },
-    {
-      "name": "express_upgrade",
-      "description": "Express upgrade support",
-      "min_version": "2.1.0.0"
-    },
-    {
-      "name": "rolling_upgrade",
-      "description": "Rolling upgrade support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "config_versioning",
-      "description": "Configurable versions support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "datanode_non_root",
-      "description": "DataNode running as non-root support (AMBARI-7615)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "remove_ranger_hdfs_plugin_env",
-      "description": "HDFS removes Ranger env files (AMBARI-14299)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger",
-      "description": "Ranger Service support",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_tagsync_component",
-      "description": "Ranger Tagsync component support (AMBARI-14383)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "phoenix",
-      "description": "Phoenix Service support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "nfs",
-      "description": "NFS support",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "tez_for_spark",
-      "description": "Tez dependency for Spark",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "timeline_state_store",
-      "description": "Yarn application timeline-service supports state store property (AMBARI-11442)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "copy_tarball_to_hdfs",
-      "description": "Copy tarball to HDFS support (AMBARI-12113)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "spark_16plus",
-      "description": "Spark 1.6+",
-      "min_version": "2.4.0.0"
-    },
-    {
-      "name": "spark_thriftserver",
-      "description": "Spark Thrift Server",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "storm_kerberos",
-      "description": "Storm Kerberos support (AMBARI-7570)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "storm_ams",
-      "description": "Storm AMS integration (AMBARI-10710)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "create_kafka_broker_id",
-      "description": "Ambari should create Kafka Broker Id (AMBARI-12678)",
-      "min_version": "2.2.0.0",
-      "max_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_listeners",
-      "description": "Kafka listeners (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "kafka_kerberos",
-      "description": "Kafka Kerberos support (AMBARI-10984)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "pig_on_tez",
-      "description": "Pig on Tez support (AMBARI-7863)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_usersync_non_root",
-      "description": "Ranger Usersync as non-root user (AMBARI-10416)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "ranger_audit_db_support",
-      "description": "Ranger Audit to DB support",
-      "min_version": "2.2.0.0",
-      "max_version": "2.5.0.0"
-    },
-    {
-      "name": "accumulo_kerberos_user_auth",
-      "description": "Accumulo Kerberos User Auth (AMBARI-10163)",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "knox_versioned_data_dir",
-      "description": "Use versioned data dir for Knox (AMBARI-13164)",
-      "min_version": "2.3.2.0"
-    },
-    {
-      "name": "knox_sso_topology",
-      "description": "Knox SSO Topology support (AMBARI-13975)",
-      "min_version": "2.3.8.0"
-    },
-    {
-      "name": "atlas_rolling_upgrade",
-      "description": "Rolling upgrade support for Atlas",
-      "min_version": "2.3.0.0"
-    },
-    {
-      "name": "oozie_admin_user",
-      "description": "Oozie install user as an Oozie admin user (AMBARI-7976)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_create_hive_tez_configs",
-      "description": "Oozie create configs for Ambari Hive and Tez deployments (AMBARI-8074)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_setup_shared_lib",
-      "description": "Oozie setup tools used to shared Oozie lib to HDFS (AMBARI-7240)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "oozie_host_kerberos",
-      "description": "Oozie in secured clusters uses _HOST in Kerberos principal (AMBARI-9775)",
-      "min_version": "2.0.0.0",
-      "max_version": "2.2.0.0"
-    },
-    {
-      "name": "hive_metastore_upgrade_schema",
-      "description": "Hive metastore upgrade schema support (AMBARI-11176)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server_interactive",
-      "description": "Hive server interactive support (AMBARI-15573)",
-      "min_version": "2.5.0.0"
-     },
-    {
-      "name": "hive_webhcat_specific_configs",
-      "description": "Hive webhcat specific configurations support (AMBARI-12364)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_purge_table",
-      "description": "Hive purge table support (AMBARI-12260)",
-      "min_version": "2.3.0.0"
-     },
-    {
-      "name": "hive_server2_kerberized_env",
-      "description": "Hive server2 working on kerberized environment (AMBARI-13749)",
-      "min_version": "2.2.3.0",
-      "max_version": "2.2.5.0"
-     },
-    {
-      "name": "hive_env_heapsize",
-      "description": "Hive heapsize property defined in hive-env (AMBARI-12801)",
-      "min_version": "2.2.0.0"
-    },
-    {
-      "name": "ranger_kms_hsm_support",
-      "description": "Ranger KMS HSM support (AMBARI-15752)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_log4j_support",
-      "description": "Ranger supporting log-4j properties (AMBARI-15681)",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_kerberos_support",
-      "description": "Ranger Kerberos support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "ranger_usersync_password_jceks",
-      "description": "Saving Ranger Usersync credentials in jceks",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "logsearch_support",
-      "description": "LogSearch Service support",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "hbase_home_directory",
-      "description": "Hbase home directory in HDFS needed for HBASE backup",
-      "min_version": "2.5.0.0"
-    },
-    {
-      "name": "spark_livy",
-      "description": "Livy as slave component of spark",
-      "min_version": "2.5.0.0"
-    }
-  ]
-}
+from resource_management.core.exceptions import Fail
 
 def check_stack_feature(stack_feature, stack_version):
   """
@@ -268,24 +33,24 @@ def check_stack_feature(stack_feature, stack_version):
   from resource_management.libraries.functions.default import default
   from resource_management.libraries.functions.version import compare_versions
   stack_features_config = default("/configurations/cluster-env/stack_features", None)
-  data = _DEFAULT_STACK_FEATURES
 
   if not stack_version:
     return False
 
   if stack_features_config:
     data = json.loads(stack_features_config)
-  
-  for feature in data["stack_features"]:
-    if feature["name"] == stack_feature:
-      if "min_version" in feature:
-        min_version = feature["min_version"]
-        if compare_versions(stack_version, min_version, format = True) < 0:
-          return False
-      if "max_version" in feature:
-        max_version = feature["max_version"]
-        if compare_versions(stack_version, max_version, format = True) >= 0:
-          return False
-      return True
+    for feature in data["stack_features"]:
+      if feature["name"] == stack_feature:
+        if "min_version" in feature:
+          min_version = feature["min_version"]
+          if compare_versions(stack_version, min_version, format = True) < 0:
+            return False
+        if "max_version" in feature:
+          max_version = feature["max_version"]
+          if compare_versions(stack_version, max_version, format = True) >= 0:
+            return False
+        return True
+  else:
+    raise Fail("Stack features not defined by stack")
         
   return False

+ 4 - 9
ambari-common/src/main/python/resource_management/libraries/functions/stack_tools.py

@@ -24,19 +24,14 @@ __all__ = ["get_stack_tool", "get_stack_tool_name", "get_stack_tool_path",
 # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
 import ambari_simplejson as json
 
+from resource_management.core.exceptions import Fail
 from resource_management.core.logger import Logger
 from resource_management.core.utils import pad
 
+
 STACK_SELECTOR_NAME = "stack_selector"
 CONF_SELECTOR_NAME = "conf_selector"
 
-# Format
-# SELECTOR_NAME : ( "tool-name", "tool-path", "tool-package" )
-_DEFAULT_STACK_TOOLS = {
-  STACK_SELECTOR_NAME: ("hdp-select", "/usr/bin/hdp-select", "hdp-select"),
-  CONF_SELECTOR_NAME: ("conf-select", "/usr/bin/conf-select", "conf-select")
-}
-
 def get_stack_tool(name):
   """
   Give a tool selector name get the stack-specific tool name, tool path, tool package
@@ -44,12 +39,12 @@ def get_stack_tool(name):
   :return: tool_name, tool_path, tool_package
   """
   from resource_management.libraries.functions.default import default
+  stack_tools = None
   stack_tools_config = default("/configurations/cluster-env/stack_tools", None)
-  stack_tools = _DEFAULT_STACK_TOOLS
   if stack_tools_config:
     stack_tools = json.loads(stack_tools_config)
 
-  if name is None or name.lower() not in stack_tools:
+  if not stack_tools or not name or name.lower() not in stack_tools:
     Logger.warning("Cannot find config for {0} stack tool in {1}".format(str(name), str(stack_tools)))
     return (None, None, None)
 

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/state/ConfigHelper.java

@@ -105,6 +105,8 @@ public class ConfigHelper {
   public static final String CLUSTER_ENV_RETRY_ENABLED = "command_retry_enabled";
   public static final String CLUSTER_ENV_RETRY_COMMANDS = "commands_to_retry";
   public static final String CLUSTER_ENV_RETRY_MAX_TIME_IN_SEC = "command_retry_max_time_in_sec";
+  public static final String CLUSTER_ENV_STACK_FEATURES_PROPERTY = "stack_features";
+  public static final String CLUSTER_ENV_STACK_TOOLS_PROPERTY = "stack_tools";
 
   public static final String HTTP_ONLY = "HTTP_ONLY";
   public static final String HTTPS_ONLY = "HTTPS_ONLY";

+ 55 - 1
ambari-server/src/main/java/org/apache/ambari/server/upgrade/FinalUpgradeCatalog.java

@@ -21,15 +21,33 @@ package org.apache.ambari.server.upgrade;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 import org.apache.ambari.server.AmbariException;
+import org.apache.ambari.server.api.services.AmbariMetaInfo;
+import org.apache.ambari.server.controller.AmbariManagementController;
+import org.apache.ambari.server.state.Cluster;
+import org.apache.ambari.server.state.Clusters;
+import org.apache.ambari.server.state.StackId;
+import org.apache.ambari.server.state.StackInfo;
+import org.apache.ambari.server.state.PropertyInfo;
+import org.apache.ambari.server.state.ConfigHelper;
 import org.apache.ambari.server.utils.VersionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.List;
 
 /**
  * Final upgrade catalog which simply updates database version (in case if no db changes between releases)
  */
 public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
 
+  /**
+   * Logger.
+   */
+  private static final Logger LOG = LoggerFactory.getLogger(FinalUpgradeCatalog.class);
+
   @Inject
   public FinalUpgradeCatalog(Injector injector) {
     super(injector);
@@ -47,7 +65,43 @@ public class FinalUpgradeCatalog extends AbstractUpgradeCatalog {
 
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
-    //noop
+    updateClusterEnv();
+  }
+
+  /**
+   * Updates {@code cluster-env} in the following ways:
+   * <ul>
+   * <li>Adds/Updates {@link ConfigHelper#CLUSTER_ENV_STACK_FEATURES_PROPERTY} from stack</li>
+   * <li>Adds/Updates {@link ConfigHelper#CLUSTER_ENV_STACK_TOOLS_PROPERTY} from stack</li>
+   * </ul>
+   *
+   * Note: Config properties stack_features and stack_tools should always be updated to latest values as defined
+   * in the stack on an Ambari upgrade.
+   *
+   * @throws Exception
+   */
+  protected void updateClusterEnv() throws AmbariException {
+
+    AmbariManagementController ambariManagementController = injector.getInstance(
+        AmbariManagementController.class);
+    AmbariMetaInfo ambariMetaInfo = injector.getInstance(AmbariMetaInfo.class);
+
+    LOG.info("Updating stack_features and stack_tools config properties.");
+    Clusters clusters = ambariManagementController.getClusters();
+    Map<String, Cluster> clusterMap = getCheckedClusterMap(clusters);
+    for (final Cluster cluster : clusterMap.values()) {
+      Map<String, String> propertyMap = new HashMap<>();
+      StackId stackId = cluster.getCurrentStackVersion();
+      StackInfo stackInfo = ambariMetaInfo.getStack(stackId.getStackName(), stackId.getStackVersion());
+      List<PropertyInfo> properties = stackInfo.getProperties();
+      for(PropertyInfo property : properties) {
+        if(property.getName().equals(ConfigHelper.CLUSTER_ENV_STACK_FEATURES_PROPERTY) ||
+            property.getName().equals(ConfigHelper.CLUSTER_ENV_STACK_TOOLS_PROPERTY)) {
+          propertyMap.put(property.getName(), property.getValue());
+        }
+      }
+      updateConfigurationPropertiesForCluster(cluster, ConfigHelper.CLUSTER_ENV, propertyMap, true, true);
+    }
   }
 
   @Override

+ 13 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/configuration/cluster-env.xml

@@ -181,6 +181,7 @@ gpgcheck=0</value>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
+  <!-- Define stack_tools property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_tools</name>
     <value/>
@@ -189,10 +190,14 @@ gpgcheck=0</value>
     <value-attributes>
       <property-file-name>stack_tools.json</property-file-name>
       <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
     </value-attributes>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>
+  <!-- Define stack_features property in the base stack. DO NOT override this property for each stack version -->
   <property>
     <name>stack_features</name>
     <value/>
@@ -201,6 +206,9 @@ gpgcheck=0</value>
     <value-attributes>
       <property-file-name>stack_features.json</property-file-name>
       <property-file-type>json</property-file-type>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
     </value-attributes>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
@@ -209,6 +217,11 @@ gpgcheck=0</value>
     <name>stack_root</name>
     <value>/usr/hdp</value>
     <description>Stack root folder</description>
+    <value-attributes>
+      <read-only>true</read-only>
+      <overridable>false</overridable>
+      <visible>false</visible>
+    </value-attributes>
     <on-ambari-upgrade add="false" change="true" delete="true"/>
     <on-stack-upgrade add="true" change="true" delete="false"/>
   </property>

+ 6 - 2
ambari-server/src/test/python/TestVersionSelectUtil.py

@@ -39,7 +39,8 @@ class TestVersionSelectUtil(TestCase):
   @patch('__builtin__.open')
   @patch("resource_management.core.shell.call")
   @patch('os.path.exists')
-  def test_get_component_version(self, os_path_exists_mock, call_mock, open_mock):
+  @patch("resource_management.libraries.functions.stack_tools.get_stack_tool")
+  def test_get_component_version(self, get_stack_tool_mock, os_path_exists_mock, call_mock, open_mock):
     stack_expected_version = "2.2.1.0-2175"
 
     # Mock classes for reading from a file
@@ -69,11 +70,14 @@ class TestVersionSelectUtil(TestCase):
       def read(self):
         return super(MagicFile3, self).read("hadoop-hdfs-datanode")
 
+    get_stack_tool_mock.side_effect = [("hdp-select", "/usr/bin/hdp-select", "hdp-select"),
+                                       ("hdp-select", "/usr/bin/hdp-select", "hdp-select"),
+                                       ("hdp-select", "/usr/bin/hdp-select", "hdp-select"),
+                                       ("hdp-select", "/usr/bin/hdp-select", "hdp-select")]
     os_path_exists_mock.side_effect = [False, True, True, True]
     open_mock.side_effect = [MagicFile1(), MagicFile2(), MagicFile3()]
     call_mock.side_effect = [(0, "value will come from MagicFile"), ] * 3
 
-
     # Missing stack name
     version = self.module.get_component_version(None, "hadoop-hdfs-datanode")
     self.assertEquals(version, None)

+ 11 - 0
ambari-server/src/test/python/custom_actions/test_ru_set_all.py

@@ -80,6 +80,9 @@ class TestRUSetAll(RMFTestCase):
     with open(json_file_path, "r") as json_file:
       json_payload = json.load(json_file)
 
+    json_payload["configurations"]["cluster-env"]["stack_tools"] = self.get_stack_tools()
+    json_payload["configurations"]["cluster-env"]["stack_features"] = self.get_stack_features()
+
     config_dict = ConfigDictionary(json_payload)
 
     family_mock.return_value = True
@@ -114,6 +117,8 @@ class TestRUSetAll(RMFTestCase):
 
     json_payload['hostLevelParams']['stack_version'] = "2.3"
     json_payload['commandParams']['version'] = "2.3.0.0-1234"
+    json_payload["configurations"]["cluster-env"]["stack_tools"] = self.get_stack_tools()
+    json_payload["configurations"]["cluster-env"]["stack_features"] = self.get_stack_features()
 
     config_dict = ConfigDictionary(json_payload)
 
@@ -169,6 +174,8 @@ class TestRUSetAll(RMFTestCase):
       json_payload['commandParams']['target_stack'] = "HDP-2.3"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.2"
+      json_payload["configurations"]["cluster-env"]["stack_tools"] = self.get_stack_tools()
+      json_payload["configurations"]["cluster-env"]["stack_features"] = self.get_stack_features()
 
       config_dict = ConfigDictionary(json_payload)
 
@@ -205,6 +212,8 @@ class TestRUSetAll(RMFTestCase):
       json_payload['commandParams']['target_stack'] = "HDP-2.3"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.3"
+      json_payload["configurations"]["cluster-env"]["stack_tools"] = self.get_stack_tools()
+      json_payload["configurations"]["cluster-env"]["stack_features"] = self.get_stack_features()
 
       # reset config
       config_dict = ConfigDictionary(json_payload)
@@ -231,6 +240,8 @@ class TestRUSetAll(RMFTestCase):
       json_payload['commandParams']['target_stack'] = "HDP-2.2"
       json_payload['commandParams']['upgrade_direction'] = "downgrade"
       json_payload['hostLevelParams']['stack_version'] = "2.2"
+      json_payload["configurations"]["cluster-env"]["stack_tools"] = self.get_stack_tools()
+      json_payload["configurations"]["cluster-env"]["stack_features"] = self.get_stack_features()
 
       # reset config
       config_dict = ConfigDictionary(json_payload)

+ 1 - 0
ambari-server/src/test/python/stacks/2.2/common/test_conf_select.py

@@ -54,6 +54,7 @@ class TestConfSelect(RMFTestCase):
 
   @patch("resource_management.core.shell.call")
   @patch("resource_management.libraries.functions.conf_select._valid", new = MagicMock(return_value=True))
+  @patch("resource_management.libraries.functions.stack_tools.get_stack_tool_path", new = MagicMock(return_value="/usr/bin/conf-select"))
   def test_create_seeds_configuration_directories(self, shell_call_mock):
     """
     Tests that conf-select seeds new directories

+ 21 - 0
ambari-server/src/test/python/stacks/utils/RMFTestCase.py

@@ -100,6 +100,9 @@ class RMFTestCase(TestCase):
     else:
       raise RuntimeError("Please specify either config_file_path or config_dict parameter")
 
+    self.config_dict["configurations"]["cluster-env"]["stack_tools"] = RMFTestCase.get_stack_tools()
+    self.config_dict["configurations"]["cluster-env"]["stack_features"] = RMFTestCase.get_stack_features()
+
     if config_overrides:
       for key, value in config_overrides.iteritems():
         self.config_dict[key] = value
@@ -163,6 +166,24 @@ class RMFTestCase(TestCase):
   def _getCommonServicesFolder():
     return os.path.join(RMFTestCase.get_src_folder(), PATH_TO_COMMON_SERVICES)
 
+  @staticmethod
+  def get_stack_tools():
+    """
+    Read stack_tools config property from resources/stacks/HDP/2.0.6/properties/stack_tools.json
+    """
+    stack_tools_file = os.path.join(RMFTestCase.get_src_folder(), PATH_TO_STACKS, "2.0.6", "properties", "stack_tools.json")
+    with open(stack_tools_file, "r") as f:
+      return f.read()
+
+  @staticmethod
+  def get_stack_features():
+    """
+    Read stack_features config property from resources/stacks/HDP/2.0.6/properties/stack_features.json
+    """
+    stack_features_file = os.path.join(RMFTestCase.get_src_folder(), PATH_TO_STACKS, "2.0.6", "properties", "stack_features.json")
+    with open(stack_features_file, "r") as f:
+      return f.read()
+
   @staticmethod
   def _getStackTestsFolder():
     return os.path.join(RMFTestCase.get_src_folder(), PATH_TO_STACK_TESTS)