Browse Source

AMBARI-6488. Move global to env in stack definitions (aonishuk)

Andrew Onishuk 11 năm trước cách đây
mục cha
commit
b0ae1fdde4
100 tập tin đã thay đổi với 1138 bổ sung1210 xóa
  1. 0 5
      ambari-agent/src/main/python/resource_management/libraries/functions/default.py
  2. 33 26
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
  3. 1 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java
  4. 1 1
      ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py
  5. 15 14
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
  6. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py
  7. 30 31
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
  8. 0 6
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py
  9. 17 17
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
  10. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml
  11. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml
  12. 10 10
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py
  13. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py
  14. 58 36
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml
  15. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml
  16. 4 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py
  17. 18 17
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py
  18. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py
  19. 0 87
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml
  20. 105 36
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
  21. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml
  22. 18 19
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  23. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py
  24. 40 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml
  25. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
  26. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py
  27. 19 18
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
  28. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
  29. 0 78
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2
  30. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml
  31. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml
  32. 9 9
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py
  33. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py
  34. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml
  35. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
  36. 10 10
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py
  37. 61 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml
  38. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml
  39. 4 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py
  40. 14 13
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py
  41. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py
  42. 0 88
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2
  43. 9 4
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml
  44. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml
  45. 6 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py
  46. 7 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py
  47. 0 36
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2
  48. 24 25
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/configuration/sqoop-env.xml
  49. 3 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml
  50. 6 5
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py
  51. 5 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py
  52. 0 36
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2
  53. 54 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-env.xml
  54. 30 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml
  55. 1 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
  56. 11 11
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
  57. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py
  58. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py
  59. 0 63
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2
  60. 19 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-env.xml
  61. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml
  62. 14 13
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py
  63. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py
  64. 7 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py
  65. 0 44
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
  66. 15 14
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  67. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py
  68. 20 20
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py
  69. 19 19
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  70. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml
  71. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml
  72. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py
  73. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml
  74. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml
  75. 11 11
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py
  76. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py
  77. 58 37
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml
  78. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml
  79. 5 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py
  80. 18 17
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py
  81. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py
  82. 0 87
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml
  83. 103 36
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
  84. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml
  85. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py
  86. 17 17
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  87. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py
  88. 42 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml
  89. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
  90. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py
  91. 20 19
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
  92. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
  93. 0 79
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2
  94. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml
  95. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
  96. 11 11
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py
  97. 69 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml
  98. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml
  99. 3 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py
  100. 14 12
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py

+ 0 - 5
ambari-agent/src/main/python/resource_management/libraries/functions/default.py

@@ -25,13 +25,8 @@ from resource_management.libraries.script import Script
 from resource_management.libraries.script.config_dictionary import UnknownConfiguration
 from resource_management.core.logger import Logger
 
-default_subdict='/configurations/global'
-
 def default(name, default_value):
   subdicts = filter(None, name.split('/'))
-  
-  if not name.startswith('/'):
-    subdicts = filter(None, default_subdict.split('/')) + subdicts
 
   curr_dict = Script.get_config()
   for x in subdicts:

+ 33 - 26
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java

@@ -240,37 +240,44 @@ public class HeartbeatMonitor implements Runnable {
 
     Map<String, Map<String, String>> configurations = new TreeMap<String, Map<String, String>>();
 
-    // get the cluster config for type 'global'
+    // get the cluster config for type '*-env'
     // apply config group overrides
 
-    Config clusterConfig = cluster.getDesiredConfigByType(GLOBAL);
-    if (clusterConfig != null) {
-      // cluster config for 'global'
-      Map<String, String> props = new HashMap<String, String>(clusterConfig.getProperties());
-
-      // Apply global properties for this host from all config groups
-      Map<String, Map<String, String>> allConfigTags = configHelper
-              .getEffectiveDesiredTags(cluster, hostname);
-
-      Map<String, Map<String, String>> configTags = new HashMap<String,
-              Map<String, String>>();
-
-      for (Map.Entry<String, Map<String, String>> entry : allConfigTags.entrySet()) {
-        if (entry.getKey().equals(GLOBAL)) {
-          configTags.put(GLOBAL, entry.getValue());
+    //Config clusterConfig = cluster.getDesiredConfigByType(GLOBAL);
+    Collection<Config> clusterConfigs = cluster.getAllConfigs();
+    
+    for(Config clusterConfig: clusterConfigs) {
+      if(!clusterConfig.getType().endsWith("-env"))
+        continue;
+    
+      if (clusterConfig != null) {
+        // cluster config for 'global'
+        Map<String, String> props = new HashMap<String, String>(clusterConfig.getProperties());
+  
+        // Apply global properties for this host from all config groups
+        Map<String, Map<String, String>> allConfigTags = configHelper
+                .getEffectiveDesiredTags(cluster, hostname);
+  
+        Map<String, Map<String, String>> configTags = new HashMap<String,
+                Map<String, String>>();
+  
+        for (Map.Entry<String, Map<String, String>> entry : allConfigTags.entrySet()) {
+          if (entry.getKey().equals(clusterConfig.getType())) {
+            configTags.put(clusterConfig.getType(), entry.getValue());
+          }
         }
-      }
-
-      Map<String, Map<String, String>> properties = configHelper
-              .getEffectiveConfigProperties(cluster, configTags);
-
-      if (!properties.isEmpty()) {
-        for (Map<String, String> propertyMap : properties.values()) {
-          props.putAll(propertyMap);
+  
+        Map<String, Map<String, String>> properties = configHelper
+                .getEffectiveConfigProperties(cluster, configTags);
+  
+        if (!properties.isEmpty()) {
+          for (Map<String, String> propertyMap : properties.values()) {
+            props.putAll(propertyMap);
+          }
         }
+  
+        configurations.put(clusterConfig.getType(), props);
       }
-
-      configurations.put(GLOBAL, props);
     }
 
     StatusCommand statusCmd = new StatusCommand();

+ 1 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterResourceProvider.java

@@ -602,7 +602,6 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
       clusterProperties.put(CLUSTER_DESIRED_CONFIGS_PROPERTY_ID +
           "/properties/" + entry.getKey(), entry.getValue());
     }
-
     getManagementController().updateClusters(
         Collections.singleton(getRequest(clusterProperties)), null);
   }
@@ -861,7 +860,7 @@ public class ClusterResourceProvider extends BaseBlueprintProcessor {
     propertyUpdaters.put("hive.metastore.uris", new SingleHostPropertyUpdater("HIVE_SERVER"));
     propertyUpdaters.put("hive_ambari_host", new SingleHostPropertyUpdater("HIVE_SERVER"));
     propertyUpdaters.put("javax.jdo.option.ConnectionURL",
-        new DBPropertyUpdater("MYSQL_SERVER", "global", "hive_database"));
+        new DBPropertyUpdater("MYSQL_SERVER", "hive-env", "hive_database"));
 
     // OOZIE_SERVER
     propertyUpdaters.put("oozie.base.url", new SingleHostPropertyUpdater("OOZIE_SERVER"));

+ 1 - 1
ambari-server/src/main/resources/custom_actions/ambari_hdfs_rebalancer.py

@@ -37,7 +37,7 @@ class HdfsRebalance(Script):
 
     if security_enabled:
       kinit_path_local = functions.get_kinit_path(
-        [default('kinit_path_local', None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+        ["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
       principal = config['commandParams']['principal']
       keytab = config['commandParams']['keytab']
       Execute(format("{kinit_path_local}  -kt {keytab} {principal}"))

+ 15 - 14
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py

@@ -31,31 +31,32 @@ java_home = config['hostLevelParams']['java_home']
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 if System.get_instance().os_family == "suse":
   jsvc_path = "/usr/lib/bigtop-utils"
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = default("ttnode_heapsize","1024m")
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
+ttnode_heapsize = default("/configurations/mapred-env/ttnode_heapsize","1024m")
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapred_pid_dir_prefix = default("/configurations/hadoop-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
 
 
 #users and groups
-hdfs_user = config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -38,7 +38,7 @@ def setup_hadoop_env():
   
   File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
        owner=tc_owner,
-       content=Template('hadoop-env.sh.j2')
+       content=InlineTemplate(params.hadoop_env_sh_template)
   )
 
 def setup_config():

+ 30 - 31
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py

@@ -47,43 +47,42 @@ if System.get_instance().os_family == "suse":
   jsvc_path = "/usr/lib/bigtop-utils"
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/tnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/hadoop-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+mapred_log_dir_prefix = "/var/log/hadoop-mapreduce"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+zk_user = config['configurations']['zookeeper-env']['zk_user']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group =  "users"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
@@ -123,7 +122,7 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-ignore_groupsusers_create = default("ignore_groupsusers_create", False)
+ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsusers_create", False)
 
 
 #repo params

+ 0 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -87,12 +87,6 @@ def setup_users():
          ignore_failures = params.ignore_groupsusers_create
     )
 
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group,
-         ignore_failures = params.ignore_groupsusers_create
-    )
-
   if params.has_ganglia_server:
     Group(params.gmetad_user,
          ignore_failures = params.ignore_groupsusers_create

+ 17 - 17
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py

@@ -28,9 +28,9 @@ _authentication = config['configurations']['core-site']['hadoop.security.authent
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
 #users and groups
-hdfs_user = config['configurations']['global']['hdfs_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 #hosts
 hostname = config["hostname"]
@@ -70,14 +70,14 @@ if has_ganglia_server:
 hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_home = "/usr"
 hadoop_bin = "/usr/lib/hadoop/bin"
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 limits_conf_dir = "/etc/security/limits.d"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 #db params
 server_db_name = config['hostLevelParams']['db_name']
@@ -93,8 +93,8 @@ ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
 
-if 'rca_enabled' in config['configurations']['global']:
-  rca_enabled =  config['configurations']['global']['rca_enabled']
+if 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
 else:
   rca_enabled = False
 rca_disabled_prefix = "###"
@@ -110,21 +110,21 @@ if System.get_instance().os_family == "suse":
 else:
   jsvc_path = "/usr/libexec/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = default("ttnode_heapsize","1024m")
+jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("/configurations/mapred-env/jtnode_heapsize","1024m")
+ttnode_heapsize = default("/configurations/mapred-env/ttnode_heapsize","1024m")
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = "/var/run/hadoop-mapreduce"
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
+mapred_log_dir_prefix = hdfs_log_dir_prefix
 
 #taskcontroller.cfg
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/configuration/ganglia-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>ganglia_conf_dir</name>
     <value>/etc/ganglia/hdp</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/metainfo.xml

@@ -96,7 +96,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>ganglia-env</config-type>
       </configuration-dependencies>
       <monitoringService>true</monitoringService>
     </service>

+ 10 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/params.py

@@ -22,21 +22,21 @@ import os
 
 config = Script.get_config()
 
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = default("/configurations/global/ganglia_conf_dir","/etc/ganglia/hdp")
+user_group = config['configurations']['hadoop-env']["user_group"]
+ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir","/etc/ganglia/hdp")
 ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
 ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
 
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
 webserver_group = "apache"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-rrdcached_timeout = default("/configurations/global/rrdcached_timeout", 3600)
-rrdcached_flush_timeout = default("/configurations/global/rrdcached_flush_timeout", 7200)
-rrdcached_delay = default("/configurations/global/rrdcached_delay", 1800)
-rrdcached_write_threads = default("/configurations/global/rrdcached_write_threads", 4)
+rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
+rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
+rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
+rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
+rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
 
 ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/GANGLIA/package/scripts/status_params.py

@@ -22,4 +22,4 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']
+pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

+ 58 - 36
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/templates/hbase-env.sh.j2 → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/configuration/hbase-env.xml

@@ -1,39 +1,57 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>HBase RegionServer Heap Size.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+  <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <description>HBase User Name.</description>
+  </property>
+  
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hbase-env.sh content</description>
+    <value>
 # Set environment variables here.
 
 # The java implementation to use. Java 1.6 required.
@@ -98,3 +116,7 @@ export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_c
 export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
 export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
 {% endif %}
+    </value>
+  </property>
+
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/metainfo.xml

@@ -112,9 +112,9 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hbase-policy</config-type>
         <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
         <config-type>hbase-log4j</config-type>
       </configuration-dependencies>
 

+ 4 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/hbase.py

@@ -73,7 +73,10 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
       group = params.user_group
     )
   
-  hbase_TemplateConfig( 'hbase-env.sh')     
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner=params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )     
        
   hbase_TemplateConfig( params.metric_prop_file_name,
     tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'

+ 18 - 17
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/params.py

@@ -34,37 +34,38 @@ hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 # this is "hadoop-metrics2-hbase.properties" for 2.x stacks
 metric_prop_file_name = "hadoop-metrics.properties"
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
 
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
 regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
 
 pid_dir = status_params.pid_dir
 tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{hbase_conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{hbase_conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{hbase_conf_dir}/hbase_regionserver_jaas.conf"))
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
 
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
 
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+rs_hosts = config['clusterHostInfo']['slave_hosts'] #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
 
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 
 if security_enabled:
@@ -74,9 +75,9 @@ if security_enabled:
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hadoop-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
 else:
@@ -94,9 +95,9 @@ hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/scripts/status_params.py

@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['hbase_pid_dir']
-hbase_user = config['configurations']['global']['hbase_user']
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']

+ 0 - 87
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/global.xml

@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  
-</configuration>

+ 105 - 36
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/templates/hadoop-env.sh.j2 → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml

@@ -1,39 +1,104 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
 # Set Hadoop-specific environment variables here.
 
 # The only required environment variable is JAVA_HOME.  All others are
@@ -140,3 +205,7 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
 #Mostly required for hadoop 2.0
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/metainfo.xml

@@ -126,8 +126,8 @@
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
       </configuration-dependencies>

+ 18 - 19
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -31,15 +31,15 @@ else:
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
 
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
@@ -83,20 +83,19 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group = "users"
 
 #hadoop params
@@ -104,7 +103,7 @@ hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
 hadoop_bin = "/usr/lib/hadoop/bin"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
 dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
@@ -129,9 +128,9 @@ dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/status_params.py

@@ -21,8 +21,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
 datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
 namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")

+ 40 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/hive-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>hive_database_type</name>
     <value>mysql</value>
@@ -97,4 +97,43 @@
     <description>WebHCat User.</description>
   </property>
   
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hive-env.sh content</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH}
+else
+  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
 </configuration>

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml

@@ -128,7 +128,7 @@
 
       <configuration-dependencies>
         <config-type>hive-site</config-type>
-        <config-type>global</config-type>
+        <config-type>hive-env</config-type>
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
       </configuration-dependencies>
@@ -165,8 +165,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hive-site</config-type>
+        <config-type>hive-env</config-type>
       </configuration-dependencies>
 
     </service>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/hive.py

@@ -94,7 +94,7 @@ def hive(name=None):
   File(format("{hive_config_dir}/hive-env.sh"),
        owner=params.hive_user,
        group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+       content=InlineTemplate(params.hive_env_sh_template, conf_dir=hive_config_dir)
   )
 
   crt_file(format("{hive_conf_dir}/hive-default.xml.template"))

+ 19 - 18
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py

@@ -31,7 +31,7 @@ hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.opti
 hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
 
 #users
-hive_user = config['configurations']['global']['hive_user']
+hive_user = config['configurations']['hive-env']['hive_user']
 hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
@@ -56,26 +56,26 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 smoke_test_sql = "/tmp/hiveserver2.sql"
 smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 #hive_env
 hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 
 #hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
 
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh'
@@ -88,8 +88,8 @@ hive_metastore_pid = status_params.hive_metastore_pid
 java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']
 artifact_dir = "/tmp/HDP-artifacts/"
 
 target = format("{hive_lib}/{jdbc_jar_name}")
@@ -100,14 +100,15 @@ driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
 start_hiveserver2_path = "/tmp/start_hiveserver2_script"
 start_metastore_path = "/tmp/start_metastore_script"
 
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+hive_aux_jars_path = config['configurations']['hive-env']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
 java64_home = config['hostLevelParams']['java_home']
+hive_env_sh_template = config['configurations']['hive-env']['content']
 
 ##### MYSQL
 
-db_name = config['configurations']['global']['hive_database_name']
+db_name = config['configurations']['hive-env']['hive_database_name']
 mysql_user = "mysql"
 mysql_group = 'mysql'
 mysql_host = config['clusterHostInfo']['hive_mysql_host']
@@ -122,11 +123,11 @@ hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
 
 hcat_dbroot = hcat_lib
 
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']   #hcat_log_dir
 
 hadoop_conf_dir = '/etc/hadoop/conf'
 
@@ -151,9 +152,9 @@ hive_hdfs_user_mode = 0700
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py

@@ -22,12 +22,12 @@ from resource_management import *
 
 config = Script.get_config()
 
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
 hive_pid = 'hive-server.pid'
 
 hive_metastore_pid = 'hive.pid'
 
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
 
 if System.get_instance().os_family == "suse":
   daemon_name = 'mysql'

+ 0 - 78
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/templates/hive-env.sh.j2

@@ -1,78 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}:${HIVE_AUX_JARS_PATH}
-else
-  export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}
-fi
-export METASTORE_PORT={{hive_metastore_port}}

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/configuration/mapred-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>mapred_local_dir</name>
     <value>/hadoop/mapred</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/metainfo.xml

@@ -103,8 +103,8 @@
       <configuration-dependencies>
         <config-type>capacity-scheduler</config-type>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>mapred-site</config-type>
+        <config-type>mapred-env</config-type>
         <config-type>mapred-queue-acls</config-type>
         <config-type>mapreduce-log4j</config-type>
       </configuration-dependencies>

+ 9 - 9
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/params.py

@@ -36,18 +36,18 @@ tasktracker_pid_file = status_params.tasktracker_pid_file
 
 hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
 hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['global']['user_group']
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-mapred_log_dir_prefix = default("mapred_log_dir_prefix",hdfs_log_dir_prefix)
+user_group = config['configurations']['hadoop-env']['user_group']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+mapred_log_dir_prefix = hdfs_log_dir_prefix
 mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
 hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 #exclude file
 mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
@@ -60,9 +60,9 @@ mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapred.
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/MAPREDUCE/package/scripts/status_params.py

@@ -23,8 +23,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-mapred_user = config['configurations']['global']['mapred_user']
-pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
 
 jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/nagios-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>nagios_user</name>
     <value>nagios</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml

@@ -115,7 +115,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>nagios-env</config-type>
       </configuration-dependencies>      
       <monitoringService>true</monitoringService>
     </service>

+ 10 - 10
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/scripts/params.py

@@ -45,7 +45,7 @@ nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
 nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
 nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
 eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
+nagios_principal_name = default("/configurations/hadoop-env/nagios_principal_name", "nagios")
 hadoop_ssl_enabled = False
 
 namenode_metadata_port = get_port_from_url(config['configurations']['core-site']['fs.default.name'])
@@ -74,7 +74,7 @@ mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
 
 # this is different for HDP2
 nn_metrics_property = "FSNamesystemMetrics"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
+clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
 
 
 java64_home = config['hostLevelParams']['java_home']
@@ -82,8 +82,8 @@ check_cpu_on = is_jdk_greater_6(java64_home)
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+nagios_keytab_path = default("/configurations/hadoop-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 ganglia_port = "8651"
 ganglia_collector_slaves_port = "8660"
@@ -105,12 +105,12 @@ else:
   htpasswd_cmd = "htpasswd"
   nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
   
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
+nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
+user_group = config['configurations']['hadoop-env']['user_group']
+nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 namenode_host = default("/clusterHostInfo/namenode_host", None)
 _snamenode_host = default("/clusterHostInfo/snamenode_host", None)

+ 61 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/configuration/oozie-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>oozie_user</name>
     <value>oozie</value>
@@ -57,4 +57,64 @@
     <description>The admin port Oozie server runs.</description>
   </property>  
 
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>oozie-env.sh content</description>
+    <value>
+#!/bin/bash
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+export JRE_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/metainfo.xml

@@ -102,8 +102,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
         <config-type>oozie-log4j</config-type>
       </configuration-dependencies>
     </service>

+ 4 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/oozie.py

@@ -44,9 +44,10 @@ def oozie(is_server=False
     group = params.user_group
   )
   
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
+  File(format("{conf_dir}/oozie-env.sh"),
+       owner=params.oozie_user,
+       content=InlineTemplate(params.oozie_env_sh_template)
+  )  
 
   if (params.log4j_props != None):
     File(format("{params.conf_dir}/oozie-log4j.properties"),

+ 14 - 13
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/params.py

@@ -25,11 +25,11 @@ import status_params
 config = Script.get_config()
 
 ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -42,15 +42,15 @@ hadoop_jar_location = "/usr/lib/hadoop/"
 # for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
 ext_js_path = "/usr/share/HDP-oozie/ext.zip"
 oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
+lzo_enabled = config['configurations']['mapred-env']['lzo_enabled']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
+smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+oozie_keytab = config['configurations']['hadoop-env']['oozie_keytab']
 
 oracle_driver_jar_name = "ojdbc6.jar"
 java_share_dir = "/usr/share/java"
@@ -59,14 +59,15 @@ java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
 oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
 oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['global']['oozie_admin_port']
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
 oozie_lib_dir = "/var/lib/oozie/"
 oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
 
 jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 if jdbc_driver_name == "com.mysql.jdbc.Driver":
   jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
@@ -101,9 +102,9 @@ oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/scripts/status_params.py

@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
 pid_file = format("{oozie_pid_dir}/oozie.pid")

+ 0 - 88
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/OOZIE/package/templates/oozie-env.sh.j2

@@ -1,88 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-export JRE_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-export OOZIE_HTTP_PORT={{oozie_server_port}}
-
-# The admin port Oozie server runs
-#
-export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

+ 9 - 4
ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/configuration/pig-env.xml

@@ -20,10 +20,15 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
+  <!-- pig-env.sh -->
   <property>
-    <name>tez_user</name>
-    <value>tez</value>
-    <description></description>
+    <name>content</name>
+    <description>pig-env.sh content</description>
+    <value>
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+    </value>
   </property>
+  
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/metainfo.xml

@@ -52,7 +52,7 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>pig-env</config-type>
         <config-type>pig-log4j</config-type>
       </configuration-dependencies>
 

+ 6 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/params.py

@@ -26,13 +26,14 @@ config = Script.get_config()
 
 pig_conf_dir = "/etc/pig/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
+user_group = config['configurations']['hadoop-env']['user_group']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+pig_env_sh_template = config['configurations']['pig-env']['content']
 
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']

+ 7 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/scripts/pig.py

@@ -30,8 +30,13 @@ def pig():
     group = params.user_group
   )
 
-  pig_TemplateConfig( ['pig-env.sh','pig.properties'])
-
+  File(format("{pig_conf_dir}/pig-env.sh"),
+       owner=params.hdfs_user,
+       content=InlineTemplate(params.pig_env_sh_template)
+  )
+  
+  pig_TemplateConfig( ['pig.properties'])
+  
   if (params.log4j_props != None):
     File(format("{params.pig_conf_dir}/log4j.properties"),
          mode=0644,

+ 0 - 36
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/PIG/package/templates/pig-env.sh.j2

@@ -1,36 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

+ 24 - 25
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/configuration/sqoop-env.xml

@@ -20,31 +20,30 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
+  <!-- sqoop-env.sh -->
   <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Pid Directory for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>HBase RegionServer Heap Size.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
+    <name>content</name>
+    <description>sqoop-env.sh content</description>
+    <value>
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
 
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"
+    </value>
+  </property>
+  
 </configuration>

+ 3 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/metainfo.xml

@@ -70,6 +70,9 @@
         <scriptType>PYTHON</scriptType>
         <timeout>300</timeout>
       </commandScript>
+      <configuration-dependencies>
+        <config-type>sqoop-env</config-type>
+      </configuration-dependencies>
     </service>
   </services>
 </metainfo>

+ 6 - 5
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/params.py

@@ -22,8 +22,9 @@ config = Script.get_config()
 
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
+user_group = config['configurations']['hadoop-env']['user_group']
+sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 
 sqoop_conf_dir = "/usr/lib/sqoop/conf"
 hbase_home = "/usr"
@@ -32,6 +33,6 @@ zoo_conf_dir = "/etc/zookeeper"
 sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_user = "sqoop"
 
-keytab_path = config['configurations']['global']['keytab_path']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+keytab_path = config['configurations']['hadoop-env']['keytab_path']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

+ 5 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/scripts/sqoop.py

@@ -28,7 +28,11 @@ def sqoop(type=None):
             owner = params.sqoop_user,
             group = params.user_group
   )
-  sqoop_TemplateConfig("sqoop-env.sh")
+  File(format("{sqoop_conf_dir}/sqoop-env.sh"),
+       owner=params.sqoop_user,
+       content=InlineTemplate(params.sqoop_env_sh_template)
+  )
+  
   File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
           owner = params.sqoop_user,
           group = params.user_group

+ 0 - 36
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/SQOOP/package/templates/sqoop-env.sh.j2

@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

+ 54 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-env.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop
+    </value>
+  </property>
+  
+</configuration>

+ 30 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml

@@ -123,4 +123,34 @@ limitations under the License.
     <description>Time out for templeton api</description>
   </property>
 
+  <!-- webhcat-env.sh -->
+  <property>
+    <name>content</name>
+    <description>webhcat-env.sh content</description>
+    <value>
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop
+    </value>
+  </property>
+  
 </configuration>

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml

@@ -88,6 +88,7 @@
       </commandScript>
       <configuration-dependencies>
         <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
       </configuration-dependencies>
     </service>
   </services>

+ 11 - 11
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py

@@ -25,13 +25,13 @@ import status_params
 # server configurations
 config = Script.get_config()
 
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
-download_url = config['configurations']['global']['apache_artifacts_download_url']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
 
 config_dir = '/etc/hcatalog/conf'
 
-templeton_log_dir = config['configurations']['global']['hcat_log_dir']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 templeton_pid_dir = status_params.templeton_pid_dir
 
 pid_file = status_params.pid_file
@@ -40,16 +40,16 @@ hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.con
 templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
 
 hadoop_home = '/usr'
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
 
 webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 #hdfs directories
 webhcat_apps_dir = "/apps/webhcat"
@@ -61,9 +61,9 @@ webhcat_hdfs_user_mode = 0755
 hostname = config["hostname"]
 security_param = "true" if security_enabled else "false"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py

@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
+templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir']
 pid_file = format('{templeton_pid_dir}/webhcat.pid')

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py

@@ -67,7 +67,7 @@ def webhcat():
   File(format("{config_dir}/webhcat-env.sh"),
        owner=params.webhcat_user,
        group=params.user_group,
-       content=Template('webhcat-env.sh.j2')
+       content=InlineTemplate(params.webhcat_env_sh_template)
   )
 
   if params.security_enabled:

+ 0 - 63
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/templates/webhcat-env.sh.j2

@@ -1,63 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE={{pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop

+ 19 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/ZOOKEEPER/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/configuration/zookeeper-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>zk_user</name>
     <value>zookeeper</value>
@@ -61,5 +61,23 @@
     <value>2181</value>
     <description>Port for running ZK Server.</description>
   </property>
+  
+  <!-- zookeeper-env.sh -->
+  <property>
+    <name>content</name>
+    <description>zookeeper-env.sh content</description>
+    <value>
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
 
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}
+    </value>
+  </property>
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/metainfo.xml

@@ -64,7 +64,7 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>zookeeper-env</config-type>
         <config-type>zookeeper-log4j</config-type>
       </configuration-dependencies>
       <restartRequiredAfterChange>true</restartRequiredAfterChange>      

+ 14 - 13
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/params.py

@@ -26,23 +26,24 @@ import status_params
 config = Script.get_config()
 
 config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['global']['zk_user']
+zk_user =  config['configurations']['zookeeper-env']['zk_user']
 hostname = config['hostname']
 zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+zk_env_sh_template = config['configurations']['zookeeper-env']['content']
 
 smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
 
-zk_log_dir = config['configurations']['global']['zk_log_dir']
-zk_data_dir = config['configurations']['global']['zk_data_dir']
+zk_log_dir = config['configurations']['zookeeper-env']['zk_log_dir']
+zk_data_dir = config['configurations']['zookeeper-env']['zk_data_dir']
 zk_pid_dir = status_params.zk_pid_dir
 zk_pid_file = status_params.zk_pid_file
 zk_server_heapsize = "-Xmx1024m"
 
-tickTime = config['configurations']['global']['tickTime']
-initLimit = config['configurations']['global']['initLimit']
-syncLimit = config['configurations']['global']['syncLimit']
-clientPort = config['configurations']['global']['clientPort']
+tickTime = config['configurations']['zookeeper-env']['tickTime']
+initLimit = config['configurations']['zookeeper-env']['initLimit']
+syncLimit = config['configurations']['zookeeper-env']['syncLimit']
+clientPort = config['configurations']['zookeeper-env']['clientPort']
 
 if 'zoo.cfg' in config['configurations']:
   zoo_cfg_properties_map = config['configurations']['zoo.cfg']
@@ -50,7 +51,7 @@ else:
   zoo_cfg_properties_map = {}
 zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
 
-zk_principal_name = default("zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
+zk_principal_name = default("/configurations/hadoop-env/zookeeper_principal_name", "zookeeper@EXAMPLE.COM")
 zk_principal = zk_principal_name.replace('_HOST',hostname.lower())
 
 java64_home = config['hostLevelParams']['java_home']
@@ -58,15 +59,15 @@ java64_home = config['hostLevelParams']['java_home']
 zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
 zookeeper_hosts.sort()
 
-zk_keytab_path = config['configurations']['global']['zookeeper_keytab_path']
+zk_keytab_path = config['configurations']['hadoop-env']['zookeeper_keytab_path']
 zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
 zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 #log4j.properties
 if (('zookeeper-log4j' in config['configurations']) and ('content' in config['configurations']['zookeeper-log4j'])):

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/status_params.py

@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-zk_pid_dir = config['configurations']['global']['zk_pid_dir']
+zk_pid_dir = config['configurations']['zookeeper-env']['zk_pid_dir']
 zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

+ 7 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/scripts/zookeeper.py

@@ -34,7 +34,13 @@ def zookeeper(type = None):
   )
 
   configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
+  
+  File(format("{config_dir}/zookeeper-env.sh"),
+       content=InlineTemplate(params.zk_env_sh_template),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+  
   configFile("configuration.xsl", template_name="configuration.xsl.j2")
 
   Directory(params.zk_pid_dir,

+ 0 - 44
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2

@@ -1,44 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}

+ 15 - 14
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -31,8 +31,9 @@ java_home = config['hostLevelParams']['java_home']
 #hadoop params
 hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_conf_empty_dir = "/etc/hadoop/conf.empty"
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
 
 #hadoop-env.sh
 java_home = config['hostLevelParams']['java_home']
@@ -43,22 +44,22 @@ if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.
 else:
   jsvc_path = "/usr/lib/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #users and groups
-hdfs_user = config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -36,7 +36,7 @@ def setup_hadoop_env():
   )
   File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'),
        owner=tc_owner,
-       content=Template('hadoop-env.sh.j2')
+       content=InlineTemplate(params.hadoop_env_sh_template)
   )
 
 def setup_config():

+ 20 - 20
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py

@@ -24,26 +24,26 @@ import os
 config = Script.get_config()
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-storm_user = config['configurations']['global']['storm_user']
-tez_user = config['configurations']['global']['tez_user']
-falcon_user = config['configurations']['global']['falcon_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+zk_user = config['configurations']['zookeeper-env']['zk_user']
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
+storm_user = config['configurations']['storm-env']['storm_user']
+tez_user = config['configurations']['tez-env']['tez_user']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group =  "users"
 mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
 
@@ -100,7 +100,7 @@ jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already
 jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
 jce_location = config['hostLevelParams']['jdk_location']
 jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("ignore_groupsusers_create", False)
+ignore_groupsusers_create = default("/configurations/hadoop-env/ignore_groupsusers_create", False)
 
 #repo params
 repo_info = config['hostLevelParams']['repo_info']

+ 19 - 19
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -28,11 +28,11 @@ _authentication = config['configurations']['core-site']['hadoop.security.authent
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
 #users and groups
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-yarn_user = config['configurations']['global']['yarn_user']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
 
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 #hosts
 hostname = config["hostname"]
@@ -72,13 +72,13 @@ if has_ganglia_server:
 hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
 hadoop_lib_home = "/usr/lib/hadoop/lib"
 hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
 hadoop_home = "/usr"
 hadoop_bin = "/usr/lib/hadoop/sbin"
 
 task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 #db params
 server_db_name = config['hostLevelParams']['db_name']
@@ -94,8 +94,8 @@ ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
 ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
 ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
 
-if 'rca_enabled' in config['configurations']['global']:
-  rca_enabled =  config['configurations']['global']['rca_enabled']
+if 'rca_enabled' in config['configurations']['hadoop-env']:
+  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
 else:
   rca_enabled = False
 rca_disabled_prefix = "###"
@@ -113,25 +113,25 @@ if str(config['hostLevelParams']['stack_version']).startswith('2.0') and System.
 else:
   jsvc_path = "/usr/lib/bigtop-utils"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
 
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
+jtnode_opt_newsize = "200m"
+jtnode_opt_maxnewsize = "200m"
+jtnode_heapsize =  "1024m"
 ttnode_heapsize = "1024m"
 
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
 mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
 hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
 
 #log4j.properties
 
-yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
 
 dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/configuration/flume-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>flume_conf_dir</name>
     <value>/etc/flume/conf</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metainfo.xml

@@ -53,9 +53,9 @@
       </commandScript>
 
       <configuration-dependencies>
+        <config-type>flume-env</config-type>
         <config-type>flume-conf</config-type>
         <config-type>flume-log4j</config-type>
-        <config-type>global</config-type>
       </configuration-dependencies>
 
     </service>

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/params.py

@@ -21,8 +21,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 security_enabled = False
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/configuration/ganglia-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>ganglia_conf_dir</name>
     <value>/etc/ganglia/hdp</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/metainfo.xml

@@ -116,7 +116,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>ganglia-env</config-type>
       </configuration-dependencies>
       <monitoringService>true</monitoringService>
     </service>

+ 11 - 11
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/params.py

@@ -22,16 +22,16 @@ import os
 
 config = Script.get_config()
 
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = default("/configurations/global/ganglia_conf_dir", "/etc/ganglia/hdp")
+user_group = config['configurations']['hadoop-env']["user_group"]
+ganglia_conf_dir = default("/configurations/ganglia-env/ganglia_conf_dir", "/etc/ganglia/hdp")
 ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_runtime_dir = config['configurations']['ganglia-env']["ganglia_runtime_dir"]
 ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
 
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
+gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
+gmond_user = config['configurations']['ganglia-env']["gmond_user"]
 
-gmond_app_str = default("/configurations/global/enabled_app_servers", None)
+gmond_app_str = default("/configurations/hadoop-env/enabled_app_servers", None)
 gmond_apps = [] if gmond_app_str is None else gmond_app_str.split(',')
 gmond_apps = [x.strip() for x in gmond_apps]
 gmond_allowed_apps = ["Application1", "Application2", "Application3"]
@@ -45,11 +45,11 @@ else:
   modules_dir = "/usr/lib64/ganglia"
 
 webserver_group = "apache"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-rrdcached_timeout = default("/configurations/global/rrdcached_timeout", 3600)
-rrdcached_flush_timeout = default("/configurations/global/rrdcached_flush_timeout", 7200)
-rrdcached_delay = default("/configurations/global/rrdcached_delay", 1800)
-rrdcached_write_threads = default("/configurations/global/rrdcached_write_threads", 4)
+rrdcached_base_dir = config['configurations']['ganglia-env']["rrdcached_base_dir"]
+rrdcached_timeout = default("/configurations/ganglia-env/rrdcached_timeout", 3600)
+rrdcached_flush_timeout = default("/configurations/ganglia-env/rrdcached_flush_timeout", 7200)
+rrdcached_delay = default("/configurations/ganglia-env/rrdcached_delay", 1800)
+rrdcached_write_threads = default("/configurations/ganglia-env/rrdcached_write_threads", 4)
 
 ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/scripts/status_params.py

@@ -22,4 +22,4 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']
+pid_dir = config['configurations']['ganglia-env']['ganglia_runtime_dir']

+ 58 - 37
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HBASE/package/templates/hbase-env.sh.j2 → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/configuration/hbase-env.xml

@@ -1,40 +1,57 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Pid Directory for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>HBase RegionServer Heap Size.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <description>HBase User Name.</description>
+  </property>
+
+  <!-- hbase-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hbase-env.sh content</description>
+    <value>
 # Set environment variables here.
 
 # The java implementation to use. Java 1.6 required.
@@ -99,3 +116,7 @@ export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_c
 export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
 export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
 {% endif %}
+    </value>
+  </property>
+
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/metainfo.xml

@@ -102,9 +102,9 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hbase-policy</config-type>
         <config-type>hbase-site</config-type>
+        <config-type>hbase-env</config-type>
         <config-type>hbase-log4j</config-type>
       </configuration-dependencies>
 

+ 5 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/hbase.py

@@ -78,8 +78,11 @@ def hbase(name=None # 'master' or 'regionserver' or 'client'
       owner = params.hbase_user,
       group = params.user_group
     )
-  
-  hbase_TemplateConfig( 'hbase-env.sh')     
+
+  File(format("{hbase_conf_dir}/hbase-env.sh"),
+       owner = params.hbase_user,
+       content=InlineTemplate(params.hbase_env_sh_template)
+  )     
        
   hbase_TemplateConfig( params.metric_prop_file_name,
     tag = 'GANGLIA-MASTER' if name == 'master' else 'GANGLIA-RS'

+ 18 - 17
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/params.py

@@ -34,10 +34,10 @@ hbase_excluded_hosts = config['commandParams']['excluded_hosts']
 hbase_drain_only = config['commandParams']['mark_draining_only']
 
 hbase_user = status_params.hbase_user
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 
 # this is "hadoop-metrics.properties" for 1.x stacks
 metric_prop_file_name = "hadoop-metrics2-hbase.properties"
@@ -45,10 +45,10 @@ metric_prop_file_name = "hadoop-metrics2-hbase.properties"
 # not supporting 32 bit jdk.
 java64_home = config['hostLevelParams']['java_home']
 
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+log_dir = config['configurations']['hbase-env']['hbase_log_dir']
+master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize']
 
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize']
 regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
 
 pid_dir = status_params.pid_dir
@@ -57,9 +57,9 @@ tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
 _local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
 local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site'])
 
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{hbase_conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{hbase_conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{hbase_conf_dir}/hbase_regionserver_jaas.conf"))
+client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
+master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
+regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
 
 ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
 ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
@@ -70,8 +70,8 @@ if 'slave_hosts' in config['clusterHostInfo']:
 else:
   rs_hosts = default('/clusterHostInfo/hbase_rs_hosts', '/clusterHostInfo/all_hosts') 
   
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+smoke_test_user = config['configurations']['hadoop-env']['smokeuser']
+smokeuser_permissions = "RWXCA"
 service_check_data = functions.get_unique_id_and_date()
 
 if security_enabled:
@@ -81,9 +81,9 @@ if security_enabled:
 
 master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
 regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['hadoop-env']['hbase_user_keytab']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 if security_enabled:
   kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
 else:
@@ -94,16 +94,17 @@ if (('hbase-log4j' in config['configurations']) and ('content' in config['config
   log4j_props = config['configurations']['hbase-log4j']['content']
 else:
   log4j_props = None
-
+  
+hbase_env_sh_template = config['configurations']['hbase-env']['content']
 
 hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']
 hbase_staging_dir = "/apps/hbase/staging"
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HBASE/package/scripts/status_params.py

@@ -22,5 +22,5 @@ from resource_management import *
 
 config = Script.get_config()
 
-pid_dir = config['configurations']['global']['hbase_pid_dir']
-hbase_user = config['configurations']['global']['hbase_user']
+pid_dir = config['configurations']['hbase-env']['hbase_pid_dir']
+hbase_user = config['configurations']['hbase-env']['hbase_user']

+ 0 - 87
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/global.xml

@@ -1,87 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration supports_final="false">
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  
-  <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>ignore_groupsusers_create</name>
-    <value>false</value>
-    <description>Whether to ignores failures on users and group creation</description>
-  </property>
-  
-</configuration>

+ 103 - 36
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/templates/hadoop-env.sh.j2 → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml

@@ -1,39 +1,102 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>ignore_groupsusers_create</name>
+    <value>false</value>
+    <description>Whether to ignores failures on users and group creation</description>
+  </property>
+  <property>
+    <name>smokeuser</name>
+    <value>ambari-qa</value>
+    <description>User executing service checks</description>
+  </property>
+  <property>
+    <name>user_group</name>
+    <value>hadoop</value>
+    <description>Proxy user group.</description>
+  </property>
+  
+  <!-- hadoop-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hadoop-env.sh content</description>
+    <value>
 # Set Hadoop-specific environment variables here.
 
 # The only required environment variable is JAVA_HOME.  All others are
@@ -144,3 +207,7 @@ export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
 
 #Mostly required for hadoop 2.0
 export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+  
+</configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/metainfo.xml

@@ -174,8 +174,8 @@
 
       <configuration-dependencies>
         <config-type>core-site</config-type>
-        <config-type>global</config-type>
         <config-type>hdfs-site</config-type>
+        <config-type>hadoop-env</config-type>
         <config-type>hadoop-policy</config-type>
         <config-type>hdfs-log4j</config-type>
       </configuration-dependencies>

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/namenode.py

@@ -56,6 +56,7 @@ class NameNode(Script):
     import status_params
 
     env.set_params(status_params)
+    Execute(format("echo '{namenode_pid_file}' >> /1.txt"))
     check_process_status(status_params.namenode_pid_file)
     pass
 

+ 17 - 17
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -31,16 +31,16 @@ else:
 #security params
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-falcon_user = config['configurations']['global']['falcon_user']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+falcon_user = config['configurations']['falcon-env']['falcon_user']
 
 #exclude file
 hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
 exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
 update_exclude_file_only = config['commandParams']['update_exclude_file_only']
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 #hosts
 hostname = config["hostname"]
 rm_host = default("/clusterHostInfo/rm_host", [])
@@ -86,20 +86,20 @@ if has_ganglia_server:
   ganglia_server_host = ganglia_server_hosts[0]
 
 #users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['yarn-env']['yarn_user']
+hbase_user = config['configurations']['hbase-env']['hbase_user']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+webhcat_user = config['configurations']['hive-env']['hcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+hive_user = config['configurations']['hive-env']['hive_user']
+smoke_user =  config['configurations']['hadoop-env']['smokeuser']
+mapred_user = config['configurations']['mapred-env']['mapred_user']
 hdfs_user = status_params.hdfs_user
 
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
+user_group = config['configurations']['hadoop-env']['user_group']
+proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
 smoke_user_group = "users"
 
 #hadoop params
@@ -107,7 +107,7 @@ hadoop_conf_dir = "/etc/hadoop/conf"
 hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
 hadoop_bin = "/usr/lib/hadoop/sbin"
 
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
 
 dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
 dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/status_params.py

@@ -21,8 +21,8 @@ from resource_management import *
 
 config = Script.get_config()
 
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
+hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
 datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
 namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")

+ 42 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/hive-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>hive_database_type</name>
     <value>mysql</value>
@@ -87,4 +87,45 @@
     <description>WebHCat User.</description>
   </property>
   
+  <!-- hive-env.sh -->
+  <property>
+    <name>content</name>
+    <description>hive-env.sh content</description>
+    <value>
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hive_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
+  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
+elif [ -d "/usr/lib/hive-hcatalog/" ]; then
+  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
+else
+  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
+fi
+export METASTORE_PORT={{hive_metastore_port}}
+    </value>
+  </property>
+  
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml

@@ -182,8 +182,8 @@
         <timeout>300</timeout>
       </commandScript>
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>hive-site</config-type>
+        <config-type>hive-env</config-type>
       </configuration-dependencies>
     </service>
 

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/hive.py

@@ -83,7 +83,7 @@ def hive(name=None):
   File(format("{hive_config_dir}/hive-env.sh"),
        owner=params.hive_user,
        group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+       content=InlineTemplate(params.hive_env_sh_template, conf_dir=hive_config_dir)
   )
 
   if name == 'metastore':

+ 20 - 19
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py

@@ -29,10 +29,10 @@ hive_server_conf_dir = "/etc/hive/conf.server"
 hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
 
 hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-hive_metastore_db_type = config['configurations']['global']['hive_database_type']
+hive_metastore_db_type = config['configurations']['hive-env']['hive_database_type']
 
 #users
-hive_user = config['configurations']['global']['hive_user']
+hive_user = config['configurations']['hive-env']['hive_user']
 hive_lib = '/usr/lib/hive/lib/'
 #JDBC driver jar name
 hive_jdbc_driver = config['configurations']['hive-site']['javax.jdo.option.ConnectionDriverName']
@@ -59,26 +59,26 @@ hive_server_host = config['clusterHostInfo']['hive_server_host'][0]
 hive_server_port = default('/configurations/hive-site/hive.server2.thrift.port',"10000")
 hive_url = format("jdbc:hive2://{hive_server_host}:{hive_server_port}")
 
-smokeuser = config['configurations']['global']['smokeuser']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 smoke_test_sql = "/tmp/hiveserver2.sql"
 smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smoke_user_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
 
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 #hive_env
 hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_dbroot = config['configurations']['hive-env']['hive_dbroot']
+hive_log_dir = config['configurations']['hive-env']['hive_log_dir']
 hive_pid_dir = status_params.hive_pid_dir
 hive_pid = status_params.hive_pid
 
 #hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
+hive_database_name = config['configurations']['hive-env']['hive_database_name']
 
 #Starting hiveserver2
 start_hiveserver2_script = 'startHiveserver2.sh.j2'
@@ -91,8 +91,8 @@ hive_metastore_pid = status_params.hive_metastore_pid
 java_share_dir = '/usr/share/java'
 driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
 
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
+hdfs_user =  config['configurations']['hadoop-env']['hdfs_user']
+user_group = config['configurations']['hadoop-env']['user_group']
 artifact_dir = "/tmp/HDP-artifacts/"
 
 target = format("{hive_lib}/{jdbc_jar_name}")
@@ -103,13 +103,13 @@ driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
 start_hiveserver2_path = "/tmp/start_hiveserver2_script"
 start_metastore_path = "/tmp/start_metastore_script"
 
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 hive_heapsize = config['configurations']['hive-site']['hive.heapsize']
 java64_home = config['hostLevelParams']['java_home']
 
 ##### MYSQL
 
-db_name = config['configurations']['global']['hive_database_name']
+db_name = config['configurations']['hive-env']['hive_database_name']
 mysql_user = "mysql"
 mysql_group = 'mysql'
 mysql_host = config['clusterHostInfo']['hive_mysql_host']
@@ -135,11 +135,11 @@ else:
 
 hcat_dbroot = hcat_lib
 
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
+hcat_user = config['configurations']['hive-env']['hcat_user']
+webhcat_user = config['configurations']['hive-env']['webhcat_user']
 
 hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']
+hcat_log_dir = config['configurations']['hive-env']['hcat_log_dir']
 
 hadoop_conf_dir = '/etc/hadoop/conf'
 
@@ -156,6 +156,7 @@ else:
   log4j_exec_props = None
 
 daemon_name = status_params.daemon_name
+hive_env_sh_template = config['configurations']['hive-env']['content']
 
 hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0700
@@ -163,15 +164,15 @@ hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.wareho
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
 tez_local_api_jars = '/usr/lib/tez/tez*.jar'
 tez_local_lib_jars = '/usr/lib/tez/lib/*.jar'
-tez_user = config['configurations']['global']['tez_user']
+tez_user = config['configurations']['tez-env']['tez_user']
 
 if System.get_instance().os_family == "debian":
   mysql_configname = '/etc/mysql/my.cnf'

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py

@@ -22,12 +22,12 @@ from resource_management import *
 
 config = Script.get_config()
 
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid_dir = config['configurations']['hive-env']['hive_pid_dir']
 hive_pid = 'hive-server.pid'
 
 hive_metastore_pid = 'hive.pid'
 
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir
+hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
 
 if System.get_instance().os_family == "suse" or System.get_instance().os_family == "debian":
   daemon_name = 'mysql'

+ 0 - 79
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/templates/hive-env.sh.j2

@@ -1,79 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hive_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-if [ "${HIVE_AUX_JARS_PATH}" != "" ]; then
-  export HIVE_AUX_JARS_PATH=${HIVE_AUX_JARS_PATH}
-elif [ -d "/usr/lib/hive-hcatalog/" ]; then
-  export HIVE_AUX_JARS_PATH=/usr/lib/hive-hcatalog/share/hcatalog/hive-hcatalog-core-*.jar
-else
-  export HIVE_AUX_JARS_PATH=/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar
-fi
-export METASTORE_PORT={{hive_metastore_port}}

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/configuration/nagios-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>nagios_user</name>
     <value>nagios</value>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml

@@ -157,7 +157,7 @@
         </osSpecific>
       </osSpecifics>
       <configuration-dependencies>
-        <config-type>global</config-type>
+        <config-type>nagios-env</config-type>
       </configuration-dependencies>
       <monitoringService>true</monitoringService>
     </service>

+ 11 - 11
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/scripts/params.py

@@ -79,7 +79,7 @@ nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
 nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
 nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
 eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
+nagios_principal_name = default("/configurations/hadoop-env/nagios_principal_name", "nagios")
 hadoop_ssl_enabled = False
 
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
@@ -114,7 +114,7 @@ drpc_port = config['configurations']['storm-site']['drpc.port']
 nimbus_port = config['configurations']['storm-site']['nimbus.thrift.port']
 supervisor_port = "56431"
 storm_rest_api_port = "8745"
-falcon_port = config['configurations']['global']['falcon_port']
+falcon_port = config['configurations']['falcon-env']['falcon_port']
 ahs_port = get_port_from_url(config['configurations']['yarn-site']['yarn.timeline-service.webapp.address'])
 
 # use sensible defaults for checkpoint as they are required by Nagios and 
@@ -131,7 +131,7 @@ else:
 
 # this is different for HDP1
 nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
+clientPort = config['configurations']['zookeeper-env']['clientPort'] #ZK 
 
 
 java64_home = config['hostLevelParams']['java_home']
@@ -139,8 +139,8 @@ check_cpu_on = is_jdk_greater_6(java64_home)
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+nagios_keytab_path = default("/configurations/hadoop-env/nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 dfs_ha_enabled = False
 dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
@@ -195,12 +195,12 @@ hdp_mon_nagios_addons_path = format("{web_conf_dir}/hdp_mon_nagios_addons.conf")
 ambarinagios_php_dir = "/usr/share/hdp/nagios/"
 ambarinagios_php_filename = "nagios_alerts.php"
 
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
+nagios_user = config['configurations']['nagios-env']['nagios_user']
+nagios_group = config['configurations']['nagios-env']['nagios_group']
+nagios_web_login = config['configurations']['nagios-env']['nagios_web_login']
+nagios_web_password = config['configurations']['nagios-env']['nagios_web_password']
+user_group = config['configurations']['hadoop-env']['user_group']
+nagios_contact = config['configurations']['nagios-env']['nagios_contact']
 
 # - test for HDFS or HCFS (glusterfs)
 if 'namenode_host' in config['clusterHostInfo']:

+ 69 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/global.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/configuration/oozie-env.xml

@@ -20,7 +20,7 @@
  */
 -->
 
-<configuration supports_final="false">
+<configuration>
   <property>
     <name>oozie_user</name>
     <value>oozie</value>
@@ -57,4 +57,72 @@
     <description>The admin port Oozie server runs.</description>
   </property>
 
+  <!-- oozie-env.sh -->
+  <property>
+    <name>content</name>
+    <description>oozie-env.sh content</description>
+    <value>
+#!/bin/bash
+
+if [ -d "/usr/lib/bigtop-tomcat" ]; then
+  export OOZIE_CONFIG=${OOZIE_CONFIG:-/etc/oozie/conf}
+  export CATALINA_BASE=${CATALINA_BASE:-/var/lib/oozie/oozie-server}
+  export CATALINA_TMPDIR=${CATALINA_TMPDIR:-/var/tmp/oozie}
+  export OOZIE_CATALINA_HOME=/usr/lib/bigtop-tomcat
+fi
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+export JRE_HOME=${JAVA_HOME}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+export OOZIE_HTTP_PORT={{oozie_server_port}}
+
+# The admin port Oozie server runs
+#
+export OOZIE_ADMIN_PORT={{oozie_server_admin_port}}
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64
+    </value>
+  </property>
+
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/metainfo.xml

@@ -131,8 +131,8 @@
       </commandScript>
 
       <configuration-dependencies>
-        <config-type>global</config-type>
         <config-type>oozie-site</config-type>
+        <config-type>oozie-env</config-type>
         <config-type>oozie-log4j</config-type>
       </configuration-dependencies>
     </service>

+ 3 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/oozie.py

@@ -46,8 +46,9 @@ def oozie(is_server=False # TODO: see if see can remove this
     group = params.user_group
   )
   
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
+  File(format("{conf_dir}/oozie-env.sh"),
+    owner=params.oozie_user,
+    content=InlineTemplate(params.oozie_env_sh_template)
   )
 
   if (params.log4j_props != None):

+ 14 - 12
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py

@@ -24,11 +24,11 @@ import status_params
 # server configurations
 config = Script.get_config()
 
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
+oozie_user = config['configurations']['oozie-env']['oozie_user']
+smokeuser = config['configurations']['hadoop-env']['smokeuser']
 conf_dir = "/etc/oozie/conf"
 hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
+user_group = config['configurations']['hadoop-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"
 check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
@@ -44,11 +44,12 @@ oozie_libext_dir = "/usr/lib/oozie/libext"
 _authentication = config['configurations']['core-site']['hadoop.security.authentication']
 security_enabled = ( not is_empty(_authentication) and _authentication == 'kerberos')
 
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
 oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
+smokeuser_keytab = config['configurations']['hadoop-env']['smokeuser_keytab']
+oozie_keytab = config['configurations']['hadoop-env']['oozie_keytab']
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 
 oracle_driver_jar_name = "ojdbc6.jar"
 java_share_dir = "/usr/share/java"
@@ -57,10 +58,11 @@ java_home = config['hostLevelParams']['java_home']
 oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
 oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
 oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_log_dir = config['configurations']['oozie-env']['oozie_log_dir']
+oozie_data_dir = config['configurations']['oozie-env']['oozie_data_dir']
 oozie_server_port = get_port_from_url(config['configurations']['oozie-site']['oozie.base.url'])
-oozie_server_admin_port = config['configurations']['global']['oozie_admin_port']
+oozie_server_admin_port = config['configurations']['oozie-env']['oozie_admin_port']
+oozie_env_sh_template = config['configurations']['oozie-env']['content']
 oozie_lib_dir = "/var/lib/oozie/"
 oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
 
@@ -92,9 +94,9 @@ oozie_hdfs_user_mode = 0775
 #for create_hdfs_directory
 hostname = config["hostname"]
 hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-hdfs_user = config['configurations']['global']['hdfs_user']
-kinit_path_local = functions.get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
+hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
+kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác