Browse Source

AMBARI-11594 - Hadoop Home Directory Should Be Chosen Correctly During An Upgrade (jonathanhurley)

Jonathan Hurley 10 năm trước cách đây
mục cha
commit
51caa8a042
17 tập tin đã thay đổi với 259 bổ sung207 xóa
  1. 6 130
      ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py
  2. 152 0
      ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py
  3. 12 11
      ambari-server/src/main/java/org/apache/ambari/server/Role.java
  4. 27 21
      ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
  5. 2 4
      ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py
  6. 2 1
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py
  7. 6 6
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py
  8. 2 1
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py
  9. 3 6
      ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py
  10. 3 2
      ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py
  11. 3 2
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py
  12. 3 3
      ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py
  13. 2 3
      ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py
  14. 4 3
      ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py
  15. 7 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  16. 13 5
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
  17. 12 6
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

+ 6 - 130
ambari-common/src/main/python/resource_management/libraries/functions/conf_select.py

@@ -21,64 +21,12 @@ limitations under the License.
 __all__ = ["select", "create", "get_hadoop_conf_dir", "get_hadoop_dir"]
 
 import version
+import hdp_select
+
 from resource_management.core import shell
-from resource_management.core.exceptions import Fail
-from resource_management.core.logger import Logger
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.get_hdp_version import get_hdp_version
 from resource_management.libraries.script.script import Script
 
-# a mapping of Ambari server role to hdp-select component name for all
-# non-clients
-SERVER_ROLE_DIRECTORY_MAP = {
-  'ACCUMULO_MASTER' : 'accumulo-master',
-  'ACCUMULO_MONITOR' : 'accumulo-monitor',
-  'ACCUMULO_GC' : 'accumulo-gc',
-  'ACCUMULO_TRACER' : 'accumulo-tracer',
-  'ACCUMULO_TSERVER' : 'accumulo-tablet',
-  'ATLAS_SERVER' : 'atlas-server',
-  'FLUME_HANDLER' : 'flume-server',
-  'FALCON_SERVER' : 'falcon-server',
-  'NAMENODE' : 'hadoop-hdfs-namenode',
-  'DATANODE' : 'hadoop-hdfs-datanode',
-  'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
-  'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
-  'JOURNALNODE' : 'hadoop-hdfs-journalnode',
-  'HBASE_MASTER' : 'hbase-master',
-  'HBASE_REGIONSERVER' : 'hbase-regionserver',
-  'HIVE_METASTORE' : 'hive-metastore',
-  'HIVE_SERVER' : 'hive-server2',
-  'WEBHCAT_SERVER' : 'hive-webhcat',
-  'KAFKA_BROKER' : 'kafka-broker',
-  'KNOX_GATEWAY' : 'knox-server',
-  'OOZIE_SERVER' : 'oozie-server',
-  'RANGER_ADMIN' : 'ranger-admin',
-  'RANGER_USERSYNC' : 'ranger-usersync',
-  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
-  'NIMBUS' : 'storm-nimbus',
-  'SUPERVISOR' : 'storm-supervisor',
-  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
-  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
-  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
-  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
-  'ZOOKEEPER_SERVER' : 'zookeeper-server'
-}
-
-# mapping of service check to hdp-select component
-SERVICE_CHECK_DIRECTORY_MAP = {
-  "HDFS_SERVICE_CHECK" : "hadoop-client",
-  "TEZ_SERVICE_CHECK" : "hadoop-client",
-  "PIG_SERVICE_CHECK" : "hadoop-client"
-}
-
 TEMPLATE = "conf-select {0} --package {1} --stack-version {2} --conf-version 0"
-HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
-HADOOP_DIR_DEFAULTS = {
-  "libexec": "/usr/lib/hadoop/libexec",
-  "sbin": "/usr/lib/hadoop/sbin",
-  "bin": "/usr/bin",
-  "lib": "/usr/lib/hadoop/lib"
-}
 
 def _valid(stack_name, package, ver):
   if stack_name != "HDP":
@@ -89,21 +37,6 @@ def _valid(stack_name, package, ver):
 
   return True
 
-def _get_upgrade_stack():
-  """
-  Gets the stack name and stack version if an upgrade is currently in progress.
-  :return:  the stack name and stack version as a tuple, or None if an
-  upgrade is not in progress.
-  """
-  from resource_management.libraries.functions.default import default
-  direction = default("/commandParams/upgrade_direction", None)
-  stack_name = default("/hostLevelParams/stack_name", None)
-  stack_version = default("/commandParams/version", None)
-
-  if direction and stack_name and stack_version:
-    return (stack_name, stack_version)
-
-  return None
 
 def create(stack_name, package, version):
   """
@@ -118,6 +51,7 @@ def create(stack_name, package, version):
 
   shell.call(TEMPLATE.format("create-conf-dir", package, version), logoutput=False, quiet=True)
 
+
 def select(stack_name, package, version, try_create=True):
   """
   Selects a config version for the specified package.  Currently only works if the version is
@@ -136,6 +70,7 @@ def select(stack_name, package, version, try_create=True):
 
   shell.call(TEMPLATE.format("set-conf-dir", package, version), logoutput=False, quiet=False)
 
+
 def get_hadoop_conf_dir(force_latest_on_upgrade=False):
   """
   Gets the shared hadoop conf directory using:
@@ -157,7 +92,7 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
   if Script.is_hdp_stack_greater_or_equal("2.2"):
     hadoop_conf_dir = "/usr/hdp/current/hadoop-client/conf"
 
-    stack_info = _get_upgrade_stack()
+    stack_info = hdp_select._get_upgrade_stack()
 
     # if upgrading to >= HDP 2.3
     if stack_info is not None and Script.is_hdp_stack_greater_or_equal("2.3"):
@@ -170,7 +105,7 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
       # determine if hdp-select has been run and if not, then use the current
       # hdp version until this component is upgraded
       if not force_latest_on_upgrade:
-        current_hdp_version = get_role_component_current_hdp_version()
+        current_hdp_version = hdp_select.get_role_component_current_hdp_version()
         if current_hdp_version is not None and stack_version != current_hdp_version:
           stack_version = current_hdp_version
 
@@ -179,65 +114,6 @@ def get_hadoop_conf_dir(force_latest_on_upgrade=False):
 
   return hadoop_conf_dir
 
-def get_hadoop_dir(target):
-  """
-  Return the hadoop shared directory in the following override order
-  1. Use default for 2.1 and lower
-  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
-  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
-  However, if the upgrade has not yet invoked hdp-select, return the current
-  version of the component.
-  :target: the target directory
-  """
-
-  if not target in HADOOP_DIR_DEFAULTS:
-    raise Fail("Target {0} not defined".format(target))
-
-  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
-
-  if Script.is_hdp_stack_greater_or_equal("2.2"):
-    hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
-
-    stack_info = _get_upgrade_stack()
-
-    if stack_info is not None:
-      stack_version = stack_info[1]
-
-      # determine if hdp-select has been run and if not, then use the current
-      # hdp version until this component is upgraded
-      current_hdp_version = get_role_component_current_hdp_version()
-      if current_hdp_version is not None and stack_version != current_hdp_version:
-        stack_version = current_hdp_version
-
-      hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
-
-  return hadoop_dir
-
-
-def get_role_component_current_hdp_version():
-  """
-  Gets the current HDP version of the component that this role command is for.
-  :return:  the current HDP version of the specified component or None
-  """
-  hdp_select_component = None
-  role = default("/role", "")
-  role_command =  default("/roleCommand", "")
-
-  if role in SERVER_ROLE_DIRECTORY_MAP:
-    hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
-  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
-    hdp_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
-
-  if hdp_select_component is None:
-    return None
 
-  current_hdp_version = get_hdp_version(hdp_select_component)
 
-  if current_hdp_version is None:
-    Logger.warning("Unable to determine hdp-select version for {0}".format(
-      hdp_select_component))
-  else:
-    Logger.info("{0} is currently at version {1}".format(
-      hdp_select_component, current_hdp_version))
 
-  return current_hdp_version

+ 152 - 0
ambari-common/src/main/python/resource_management/libraries/functions/hdp_select.py

@@ -20,11 +20,74 @@ limitations under the License.
 
 import sys
 from resource_management.core.logger import Logger
+from resource_management.core.exceptions import Fail
 from resource_management.core.resources.system import Execute
+from resource_management.libraries.functions.default import default
+from resource_management.libraries.functions.get_hdp_version import get_hdp_version
+from resource_management.libraries.script.script import Script
 
 # hdp-select set oozie-server 2.2.0.0-1234
 TEMPLATE = "hdp-select set {0} {1}"
 
+# a mapping of Ambari server role to hdp-select component name for all
+# non-clients
+SERVER_ROLE_DIRECTORY_MAP = {
+  'ACCUMULO_MASTER' : 'accumulo-master',
+  'ACCUMULO_MONITOR' : 'accumulo-monitor',
+  'ACCUMULO_GC' : 'accumulo-gc',
+  'ACCUMULO_TRACER' : 'accumulo-tracer',
+  'ACCUMULO_TSERVER' : 'accumulo-tablet',
+  'ATLAS_SERVER' : 'atlas-server',
+  'FLUME_HANDLER' : 'flume-server',
+  'FALCON_SERVER' : 'falcon-server',
+  'NAMENODE' : 'hadoop-hdfs-namenode',
+  'DATANODE' : 'hadoop-hdfs-datanode',
+  'SECONDARY_NAMENODE' : 'hadoop-hdfs-secondarynamenode',
+  'NFS_GATEWAY' : 'hadoop-hdfs-nfs3',
+  'JOURNALNODE' : 'hadoop-hdfs-journalnode',
+  'HBASE_MASTER' : 'hbase-master',
+  'HBASE_REGIONSERVER' : 'hbase-regionserver',
+  'HIVE_METASTORE' : 'hive-metastore',
+  'HIVE_SERVER' : 'hive-server2',
+  'WEBHCAT_SERVER' : 'hive-webhcat',
+  'KAFKA_BROKER' : 'kafka-broker',
+  'KNOX_GATEWAY' : 'knox-server',
+  'OOZIE_SERVER' : 'oozie-server',
+  'RANGER_ADMIN' : 'ranger-admin',
+  'RANGER_USERSYNC' : 'ranger-usersync',
+  'SPARK_JOBHISTORYSERVER' : 'spark-historyserver',
+  'NIMBUS' : 'storm-nimbus',
+  'SUPERVISOR' : 'storm-supervisor',
+  'HISTORYSERVER' : 'hadoop-mapreduce-historyserver',
+  'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver',
+  'NODEMANAGER' : 'hadoop-yarn-nodemanager',
+  'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager',
+  'ZOOKEEPER_SERVER' : 'zookeeper-server'
+}
+
+# mapping of service check to hdp-select component
+SERVICE_CHECK_DIRECTORY_MAP = {
+  "HDFS_SERVICE_CHECK" : "hadoop-client",
+  "TEZ_SERVICE_CHECK" : "hadoop-client",
+  "PIG_SERVICE_CHECK" : "hadoop-client"
+}
+
+# /usr/hdp/current/hadoop-client/[bin|sbin|libexec|lib]
+# /usr/hdp/2.3.0.0-1234/hadoop/[bin|sbin|libexec|lib]
+HADOOP_DIR_TEMPLATE = "/usr/hdp/{0}/{1}/{2}"
+
+# /usr/hdp/current/hadoop-client
+# /usr/hdp/2.3.0.0-1234/hadoop
+HADOOP_HOME_DIR_TEMPLATE = "/usr/hdp/{0}/{1}"
+
+HADOOP_DIR_DEFAULTS = {
+  "home": "/usr/lib/hadoop",
+  "libexec": "/usr/lib/hadoop/libexec",
+  "sbin": "/usr/lib/hadoop/sbin",
+  "bin": "/usr/bin",
+  "lib": "/usr/lib/hadoop/lib"
+}
+
 def select(component, version):
   """
   Executes hdp-select on the specific component and version. Some global
@@ -51,3 +114,92 @@ def select(component, version):
       module = modules.get(moduleName)
       reload(module)
       Logger.info("After hdp-select {0}, reloaded module {1}".format(component, moduleName))
+
+
+def get_role_component_current_hdp_version():
+  """
+  Gets the current HDP version of the component that this role command is for.
+  :return:  the current HDP version of the specified component or None
+  """
+  hdp_select_component = None
+  role = default("/role", "")
+  role_command =  default("/roleCommand", "")
+
+  if role in SERVER_ROLE_DIRECTORY_MAP:
+    hdp_select_component = SERVER_ROLE_DIRECTORY_MAP[role]
+  elif role_command == "SERVICE_CHECK" and role in SERVICE_CHECK_DIRECTORY_MAP:
+    hdp_select_component = SERVICE_CHECK_DIRECTORY_MAP[role]
+
+  if hdp_select_component is None:
+    return None
+
+  current_hdp_version = get_hdp_version(hdp_select_component)
+
+  if current_hdp_version is None:
+    Logger.warning("Unable to determine hdp-select version for {0}".format(
+      hdp_select_component))
+  else:
+    Logger.info("{0} is currently at version {1}".format(
+      hdp_select_component, current_hdp_version))
+
+  return current_hdp_version
+
+
+def get_hadoop_dir(target):
+  """
+  Return the hadoop shared directory in the following override order
+  1. Use default for 2.1 and lower
+  2. If 2.2 and higher, use /usr/hdp/current/hadoop-client/{target}
+  3. If 2.2 and higher AND for an upgrade, use /usr/hdp/<version>/hadoop/{target}.
+  However, if the upgrade has not yet invoked hdp-select, return the current
+  version of the component.
+  :target: the target directory
+  """
+
+  if not target in HADOOP_DIR_DEFAULTS:
+    raise Fail("Target {0} not defined".format(target))
+
+  hadoop_dir = HADOOP_DIR_DEFAULTS[target]
+
+  if Script.is_hdp_stack_greater_or_equal("2.2"):
+    # home uses a different template
+    if target == "home":
+      hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format("current", "hadoop-client")
+    else:
+      hadoop_dir = HADOOP_DIR_TEMPLATE.format("current", "hadoop-client", target)
+
+    stack_info = _get_upgrade_stack()
+
+    if stack_info is not None:
+      stack_version = stack_info[1]
+
+      # determine if hdp-select has been run and if not, then use the current
+      # hdp version until this component is upgraded
+      current_hdp_version = get_role_component_current_hdp_version()
+      if current_hdp_version is not None and stack_version != current_hdp_version:
+        stack_version = current_hdp_version
+
+      if target == "home":
+        # home uses a different template
+        hadoop_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_version, "hadoop")
+      else:
+        hadoop_dir = HADOOP_DIR_TEMPLATE.format(stack_version, "hadoop", target)
+
+  return hadoop_dir
+
+
+def _get_upgrade_stack():
+  """
+  Gets the stack name and stack version if an upgrade is currently in progress.
+  :return:  the stack name and stack version as a tuple, or None if an
+  upgrade is not in progress.
+  """
+  from resource_management.libraries.functions.default import default
+  direction = default("/commandParams/upgrade_direction", None)
+  stack_name = default("/hostLevelParams/stack_name", None)
+  stack_version = default("/commandParams/version", None)
+
+  if direction and stack_name and stack_version:
+    return (stack_name, stack_version)
+
+  return None

+ 12 - 11
ambari-server/src/main/java/org/apache/ambari/server/Role.java

@@ -28,9 +28,9 @@ import java.util.concurrent.ConcurrentHashMap;
  * similar mechanisms to an enum.
  */
 public class Role {
-  
+
   private static final Map<String, Role> roles = new ConcurrentHashMap<String, Role>();
-  
+
   /**
    * @param name the role name
    * @return a Role instance, never <code>null</code>
@@ -42,17 +42,17 @@ public class Role {
 
     Role role = new Role(name);
     roles.put(name, role);
-    return role;    
+    return role;
   }
-  
+
   /**
    * @return a collection of all defined Role instances
    */
   public static Collection<Role> values() {
     return Collections.unmodifiableCollection(roles.values());
-  }  
-  
-  public static final Role AMBARI_SERVER_ACTION = valueOf("AMBARI_SERVER_ACTION"); 
+  }
+
+  public static final Role AMBARI_SERVER_ACTION = valueOf("AMBARI_SERVER_ACTION");
   public static final Role DATANODE = valueOf("DATANODE");
   public static final Role FLUME_HANDLER = valueOf("FLUME_HANDLER");
   public static final Role FLUME_SERVICE_CHECK = valueOf("FLUME_SERVICE_CHECK");
@@ -112,13 +112,14 @@ public class Role {
   public static final Role METRICS_COLLECTOR = valueOf("METRICS_COLLECTOR");
   public static final Role METRICS_MONITOR = valueOf("METRICS_MONITOR");
   public static final Role AMS_SERVICE_CHECK = valueOf("AMBARI_METRICS_SERVICE_CHECK");
+  public static final Role ACCUMULO_CLIENT = valueOf("ACCUMULO_CLIENT");
 
   private String name = null;
-  
+
   private Role(String roleName) {
     name = roleName;
   }
-  
+
   /**
    * @return the name given to the role
    */
@@ -130,7 +131,7 @@ public class Role {
   public String toString() {
     return name;
   }
-  
+
   @Override
   public int hashCode() {
     return name.hashCode();
@@ -141,7 +142,7 @@ public class Role {
     if (null == o || !Role.class.equals(o.getClass())) {
       return false;
     }
-    
+
     return this == o || name.equals(((Role) o).name);
   }
 

+ 27 - 21
ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java

@@ -18,12 +18,17 @@
 
 package org.apache.ambari.server.metadata;
 
-import com.google.inject.Singleton;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.state.Service;
 
-import java.util.*;
+import com.google.inject.Singleton;
 
 /**
  * Contains metadata about actions supported by services
@@ -36,13 +41,13 @@ public class ActionMetadata {
       new HashMap<String, String>();
   private final List<String> defaultHostComponentCommands = new ArrayList<String>();
   public final static String SERVICE_CHECK_POSTFIX = "_SERVICE_CHECK";
-  
+
   private static final Map<String, String> SERVICE_CHECKS;
   static {
       Map<String, String> serviceChecks = new HashMap<String, String>();
-      
+
       serviceChecks.put(Service.Type.ZOOKEEPER.toString(), "ZOOKEEPER_QUORUM_SERVICE_CHECK");
-      
+
       SERVICE_CHECKS = Collections.unmodifiableMap(serviceChecks);
   }
 
@@ -59,19 +64,20 @@ public class ActionMetadata {
   }
 
   private void fillServiceClients() {
-    serviceClients.put("hdfs"       , Role.HDFS_CLIENT.toString());
-    serviceClients.put("glusterfs"  , Role.GLUSTERFS_CLIENT.toString());
-    serviceClients.put("hbase"      , Role.HBASE_CLIENT.toString());
-    serviceClients.put("mapreduce"  , Role.MAPREDUCE_CLIENT.toString());
-    serviceClients.put("zookeeper"  , Role.ZOOKEEPER_CLIENT.toString());
-    serviceClients.put("hive"       , Role.HIVE_CLIENT.toString());
-    serviceClients.put("hcat"       , Role.HCAT.toString());
-    serviceClients.put("oozie"      , Role.OOZIE_CLIENT.toString());
-    serviceClients.put("pig"        , Role.PIG.toString());
-    serviceClients.put("mahout"     , Role.MAHOUT.toString());
-    serviceClients.put("sqoop"      , Role.SQOOP.toString());
-    serviceClients.put("yarn"       , Role.YARN_CLIENT.toString());
-    serviceClients.put("kerberos"   , Role.KERBEROS_CLIENT.toString());
+    serviceClients.put("hdfs", Role.HDFS_CLIENT.toString());
+    serviceClients.put("glusterfs", Role.GLUSTERFS_CLIENT.toString());
+    serviceClients.put("hbase", Role.HBASE_CLIENT.toString());
+    serviceClients.put("mapreduce", Role.MAPREDUCE_CLIENT.toString());
+    serviceClients.put("zookeeper", Role.ZOOKEEPER_CLIENT.toString());
+    serviceClients.put("hive", Role.HIVE_CLIENT.toString());
+    serviceClients.put("hcat", Role.HCAT.toString());
+    serviceClients.put("oozie", Role.OOZIE_CLIENT.toString());
+    serviceClients.put("pig", Role.PIG.toString());
+    serviceClients.put("mahout", Role.MAHOUT.toString());
+    serviceClients.put("sqoop", Role.SQOOP.toString());
+    serviceClients.put("yarn", Role.YARN_CLIENT.toString());
+    serviceClients.put("kerberos", Role.KERBEROS_CLIENT.toString());
+    serviceClients.put("accumulo", Role.ACCUMULO_CLIENT.toString());
   }
 
   public List<String> getActions(String serviceName) {
@@ -90,14 +96,14 @@ public class ActionMetadata {
   public String getServiceCheckAction(String serviceName) {
     return serviceCheckActions.get(serviceName.toLowerCase());
   }
-  
+
   public void addServiceCheckAction(String serviceName) {
     String actionName = serviceName + SERVICE_CHECK_POSTFIX;
-    
+
     if(SERVICE_CHECKS.containsKey(serviceName)) {
       actionName = SERVICE_CHECKS.get(serviceName);
     }
-    
+
     serviceCheckActions.put(serviceName.toLowerCase(), actionName);
     serviceActions.put(serviceName.toLowerCase(), Arrays.asList(actionName));
   }

+ 2 - 4
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/params_linux.py

@@ -19,15 +19,13 @@ limitations under the License.
 import status_params
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
-from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
-
-
 config = Script.get_config()
 
 stack_name = default("/hostLevelParams/stack_name", None)
@@ -40,7 +38,7 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 etc_prefix_dir = "/etc/falcon"
 
 # hadoop params
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 
 if Script.is_hdp_stack_greater_or_equal("2.2"):
 

+ 2 - 1
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/params_linux.py

@@ -26,6 +26,7 @@ from ambari_commons.constants import AMBARI_SUDO_BINARY
 
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
@@ -51,7 +52,7 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 # hadoop default parameters
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
 region_mover = "/usr/lib/hbase/bin/region_mover.rb"

+ 6 - 6
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/params_linux.py

@@ -26,6 +26,7 @@ import re
 from ambari_commons.os_check import OSCheck
 
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
@@ -64,18 +65,17 @@ secure_dn_ports_are_in_use = False
 
 # hadoop default parameters
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
-hadoop_bin = conf_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hadoop_home = "/usr/lib/hadoop"
+hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
+hadoop_bin = hdp_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_home = hdp_select.get_hadoop_dir("home")
 hadoop_secure_dn_user = hdfs_user
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_lib_home = conf_select.get_hadoop_dir("lib")
+hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
 
 # hadoop parameters for 2.2+
 if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_home = "/usr/hdp/current/hadoop-client"
 
   if not security_enabled:
     hadoop_secure_dn_user = '""'

+ 2 - 1
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/status_params.py

@@ -21,6 +21,7 @@ limitations under the License.
 from ambari_commons import OSCheck
 
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
@@ -71,7 +72,7 @@ else:
 
   # default configuration directories
   hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+  hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
   webhcat_conf_dir = '/etc/hive-webhcat/conf'
   hive_etc_dir_prefix = "/etc/hive"
   hive_conf_dir = "/etc/hive/conf"

+ 3 - 6
ambari-server/src/main/resources/common-services/MAHOUT/1.0.0.2.3/package/scripts/params.py

@@ -20,14 +20,13 @@ Ambari Agent
 """
 from resource_management import *
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
-
-
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -47,8 +46,8 @@ mahout_conf_dir = "/usr/hdp/current/mahout-client/conf"
 mahout_user = config['configurations']['mahout-env']['mahout_user']
 
 #hadoop params
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hadoop_home = '/usr/hdp/current/hadoop-client'
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_home = hdp_select.get_hadoop_dir("home")
 
 # the configuration direction for HDFS/YARN/MapR is the hadoop config
 # directory, which is symlinked by hadoop-client only
@@ -69,8 +68,6 @@ java64_home = config['hostLevelParams']['java_home']
 
 log4j_props = config['configurations']['mahout-log4j']['content']
 
-
-
 hdfs_site = config['configurations']['hdfs-site']
 default_fs = config['configurations']['core-site']['fs.defaultFS']
 

+ 3 - 2
ambari-server/src/main/resources/common-services/OOZIE/4.0.0.2.0/package/scripts/params_linux.py

@@ -21,6 +21,7 @@ from resource_management import *
 from ambari_commons.constants import AMBARI_SUDO_BINARY
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
@@ -48,8 +49,8 @@ stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
-hadoop_lib_home = conf_select.get_hadoop_dir("lib")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
+hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
 
 #hadoop params
 if Script.is_hdp_stack_greater_or_equal("2.2"):

+ 3 - 2
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params_linux.py

@@ -22,6 +22,7 @@ Ambari Agent
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
@@ -40,7 +41,7 @@ version = default("/commandParams/version", None)
 
 # hadoop default parameters
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 pig_conf_dir = "/etc/pig/conf"
 hadoop_home = '/usr'
 pig_bin_dir = ""
@@ -48,7 +49,7 @@ pig_bin_dir = ""
 # hadoop parameters for 2.2+
 if Script.is_hdp_stack_greater_or_equal("2.2"):
   pig_conf_dir = "/usr/hdp/current/pig-client/conf"
-  hadoop_home = '/usr/hdp/current/hadoop-client'
+  hadoop_home = hdp_select.get_hadoop_dir("home")
   pig_bin_dir = '/usr/hdp/current/pig-client/bin'
 
 hdfs_user = config['configurations']['hadoop-env']['hdfs_user']

+ 3 - 3
ambari-server/src/main/resources/common-services/SPARK/1.2.0.2.2/package/scripts/params.py

@@ -26,6 +26,7 @@ from setup_spark import *
 from resource_management import *
 import resource_management.libraries.functions
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
@@ -33,7 +34,6 @@ from resource_management.libraries.functions import get_kinit_path
 
 from resource_management.libraries.script.script import Script
 
-
 # a map of the Ambari role to the component name
 # for use with /usr/hdp/current/<component>
 SERVER_ROLE_DIRECTORY_MAP = {
@@ -63,10 +63,10 @@ version = default("/commandParams/version", None)
 
 spark_conf = '/etc/spark/conf'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 
 if Script.is_hdp_stack_greater_or_equal("2.2"):
-  hadoop_home = "/usr/hdp/current/hadoop-client"
+  hadoop_home = hdp_select.get_hadoop_dir("home")
   spark_conf = format("/usr/hdp/current/{component_directory}/conf")
   spark_log_dir = config['configurations']['spark-env']['spark_log_dir']
   spark_pid_dir = status_params.spark_pid_dir

+ 2 - 3
ambari-server/src/main/resources/common-services/TEZ/0.4.0.2.1/package/scripts/params_linux.py

@@ -21,13 +21,12 @@ import os
 
 from resource_management.libraries.resources import HdfsResource
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions.version import format_hdp_stack_version
 from resource_management.libraries.functions.default import default
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.script.script import Script
 
-
-
 # server configurations
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -43,7 +42,7 @@ version = default("/commandParams/version", None)
 
 # default hadoop parameters
 hadoop_home = '/usr'
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 tez_etc_dir = "/etc/tez"
 config_dir = "/etc/tez/conf"

+ 4 - 3
ambari-server/src/main/resources/common-services/YARN/2.1.0.2.0/package/scripts/params_linux.py

@@ -23,6 +23,7 @@ import os
 from resource_management.libraries.script.script import Script
 from resource_management.libraries.resources.hdfs_resource import HdfsResource
 from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
 from resource_management.libraries.functions import format
 from resource_management.libraries.functions import get_kinit_path
 from resource_management.libraries.functions.version import format_hdp_stack_version
@@ -63,9 +64,9 @@ version = default("/commandParams/version", None)
 hostname = config['hostname']
 
 # hadoop default parameters
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
-hadoop_bin = conf_select.get_hadoop_dir("sbin")
-hadoop_bin_dir = conf_select.get_hadoop_dir("bin")
+hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
+hadoop_bin = hdp_select.get_hadoop_dir("sbin")
+hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
 hadoop_yarn_home = '/usr/lib/hadoop-yarn'
 hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"

+ 7 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -18,9 +18,13 @@ limitations under the License.
 """
 
 from ambari_commons.constants import AMBARI_SUDO_BINARY
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
-from resource_management import *
+from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_hdp_stack_version
+
 from resource_management.core.system import System
 from ambari_commons.os_check import OSCheck
 
@@ -32,7 +36,7 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 
 # default hadoop params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
+hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 

+ 13 - 5
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -18,11 +18,20 @@ limitations under the License.
 """
 
 import collections
+import re
+
 import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
+
+from resource_management.libraries.script import Script
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format
 from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
+from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions.version import format_hdp_stack_version
+from resource_management.libraries.functions.version import compare_versions
 from ambari_commons.os_check import OSCheck
-from resource_management import *
+
 
 config = Script.get_config()
 tmp_dir = Script.get_tmp_dir()
@@ -72,18 +81,17 @@ def is_secure_port(port):
 
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_home = "/usr/lib/hadoop"
+hadoop_home = hdp_select.get_hadoop_dir("home")
 hadoop_secure_dn_user = hdfs_user
 hadoop_dir = "/etc/hadoop"
 versioned_hdp_root = '/usr/hdp/current'
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
+hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
 
 # HDP 2.2+ params
 if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_home = "/usr/hdp/current/hadoop-client"
 
   # not supported in HDP 2.2+
   hadoop_conf_empty_dir = None

+ 12 - 6
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -17,11 +17,17 @@ limitations under the License.
 
 """
 
-from resource_management.libraries.functions import conf_select, default, format_jvm_option, format
+import os
+
+from resource_management.libraries.functions import conf_select
+from resource_management.libraries.functions import hdp_select
+from resource_management.libraries.functions import default
+from resource_management.libraries.functions import format_jvm_option
+from resource_management.libraries.functions import format
 from resource_management.libraries.functions.version import format_hdp_stack_version, compare_versions
 from ambari_commons.os_check import OSCheck
 from resource_management.libraries.script.script import Script
-import os
+
 
 config = Script.get_config()
 
@@ -31,9 +37,9 @@ hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
 # hadoop default params
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 
-hadoop_libexec_dir = conf_select.get_hadoop_dir("libexec")
-hadoop_lib_home = conf_select.get_hadoop_dir("lib")
-hadoop_bin = conf_select.get_hadoop_dir("sbin")
+hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
+hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
+hadoop_bin = hdp_select.get_hadoop_dir("sbin")
 hadoop_home = '/usr'
 create_lib_snappy_symlinks = True
 hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
@@ -42,7 +48,7 @@ default_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
 # HDP 2.2+ params
 if Script.is_hdp_stack_greater_or_equal("2.2"):
   mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
-  hadoop_home = '/usr/hdp/current/hadoop-client'
+  hadoop_home = hdp_select.get_hadoop_dir("home")
   create_lib_snappy_symlinks = False
   
 current_service = config['serviceName']