Explorar o código

AMBARI-7846. Change NameNode to start with: -XX:PermSize=128m -XX:MaxPermSize=256m by default

Srimanth Gunturi %!s(int64=10) %!d(string=hai) anos
pai
achega
b340f9a951
Modificáronse 72 ficheiros con 539 adicións e 49 borrados
  1. 2 0
      ambari-client/groovy-client/src/test/resources/service-config.json
  2. 2 0
      ambari-client/groovy-client/src/test/resources/versions/service-config.json
  3. 0 0
      ambari-client/python-client/src/test/python/TestAmbariClient.py
  4. 2 2
      ambari-client/python-client/src/test/python/TestClusterModel.py
  5. 24 0
      ambari-client/python-client/src/test/python/json/ambariclient_get_config.json
  6. 2 0
      ambari-client/python-client/src/test/python/json/clustermodel_get_global_config.json
  7. 2 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
  8. 4 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py
  9. 4 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py
  10. 12 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml
  11. 4 2
      ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py
  12. 4 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py
  13. 4 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py
  14. 4 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py
  15. 4 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py
  16. 12 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml
  17. 4 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py
  18. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
  19. 10 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml
  20. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py
  21. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py
  22. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py
  23. 12 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml
  24. 4 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py
  25. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml
  26. 12 2
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
  27. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  28. 1 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
  29. 0 0
      ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json
  30. 0 0
      ambari-server/src/test/python/stacks/1.3.2/configs/default.json
  31. 0 0
      ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json
  32. 0 0
      ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json
  33. 2 0
      ambari-server/src/test/python/stacks/1.3.2/configs/secured.json
  34. 3 1
      ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json
  35. 3 1
      ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json
  36. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json
  37. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default.json
  38. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json
  39. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json
  40. 3 1
      ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json
  41. 3 1
      ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json
  42. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json
  43. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json
  44. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/secured.json
  45. 0 0
      ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json
  46. 2 0
      ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json
  47. 0 0
      ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json
  48. 2 0
      ambari-server/src/test/python/stacks/2.1/configs/default.json
  49. 0 0
      ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json
  50. 0 0
      ambari-server/src/test/python/stacks/2.1/configs/secured.json
  51. 0 0
      ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh
  52. 0 0
      ambari-server/src/test/resources/deploy_HDP2.sh
  53. 10 0
      ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/global.xml
  54. 12 2
      ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml
  55. 10 0
      ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml
  56. 10 0
      ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml
  57. 10 0
      ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml
  58. 10 0
      ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml
  59. 2 0
      ambari-web/app/assets/data/configuration/global.json
  60. 2 0
      ambari-web/app/assets/data/configurations/cluster_level_actual_configs.json
  61. 2 0
      ambari-web/app/assets/data/configurations/cluster_level_configs.json
  62. 24 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json
  63. 20 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/global.json
  64. 24 0
      ambari-web/app/assets/data/wizard/stack/hdp/version131/HDFS.json
  65. 20 0
      ambari-web/app/assets/data/wizard/stack/hdp/version131/global.json
  66. 24 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json
  67. 20 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/global.json
  68. 1 1
      ambari-web/app/controllers/main/service/info/configs.js
  69. 1 1
      ambari-web/app/controllers/wizard/step8_controller.js
  70. 58 0
      ambari-web/app/data/BIGTOP/site_properties.js
  71. 58 0
      ambari-web/app/data/HDP2/site_properties.js
  72. 57 0
      ambari-web/app/data/site_properties.js

+ 2 - 0
ambari-client/groovy-client/src/test/resources/service-config.json

@@ -19,6 +19,8 @@
   "nodemanager_heapsize": "1024",
   "namenode_opt_newsize": "200m",
   "namenode_opt_maxnewsize": "200m",
+  "namenode_opt_permsize" : "128m",
+  "namenode_opt_maxpermsize" : "256m",
   "namenode_heapsize": "1024m",
   "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/",
   "ganglia_conf_dir": "/etc/ganglia/hdp",

+ 2 - 0
ambari-client/groovy-client/src/test/resources/versions/service-config.json

@@ -19,6 +19,8 @@
   "nodemanager_heapsize": "1024",
   "namenode_opt_newsize": "200m",
   "namenode_opt_maxnewsize": "200m",
+  "namenode_opt_permsize" : "128m",
+  "namenode_opt_maxpermsize" : "256m",
   "namenode_heapsize": "1024m",
   "namenode_formatted_mark_dir": "/var/run/hadoop/hdfs/namenode/formatted/",
   "ganglia_conf_dir": "/etc/ganglia/hdp",

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-client/python-client/src/test/python/TestAmbariClient.py


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 2 - 2
ambari-client/python-client/src/test/python/TestClusterModel.py


+ 24 - 0
ambari-client/python-client/src/test/python/json/ambariclient_get_config.json

@@ -769,6 +769,30 @@
         "type" : "global.xml"
       }
     },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum permanent generation size",
+        "property_name" : "namenode_opt_maxpermsize",
+        "property_value" : "256",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode permanent generation size",
+        "property_name" : "namenode_opt_permsize",
+        "property_value" : "128",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
     {
       "href" : "http://localhost:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/proxyuser_group",
       "StackConfigurations" : {

+ 2 - 0
ambari-client/python-client/src/test/python/json/clustermodel_get_global_config.json

@@ -48,6 +48,8 @@
         "namenode_heapsize" : "1024m",
         "namenode_opt_maxnewsize" : "640m",
         "namenode_opt_newsize" : "200m",
+        "namenode_opt_permsize" : "128m",
+        "namenode_opt_maxpermsize" : "256m",
         "oozie_user" : "oozie",
         "proxyuser_group" : "users",
         "rrdcached_base_dir" : "/var/lib/ganglia/rrds",

+ 2 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -979,6 +979,8 @@ public class BlueprintConfigurationProcessor {
     hadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());
     hadoopEnvMap.put("namenode_opt_newsize", new MPropertyUpdater());
     hadoopEnvMap.put("namenode_opt_maxnewsize", new MPropertyUpdater());
+    hadoopEnvMap.put("namenode_opt_permsize", new MPropertyUpdater());
+    hadoopEnvMap.put("namenode_opt_maxpermsize", new MPropertyUpdater());
     hadoopEnvMap.put("dtnode_heapsize", new MPropertyUpdater());
     mapredEnvMap.put("jtnode_opt_newsize", new MPropertyUpdater());
     mapredEnvMap.put("jtnode_opt_maxnewsize", new MPropertyUpdater());

+ 4 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/after-INSTALL/scripts/params.py

@@ -46,8 +46,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 4 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/hooks/before-START/scripts/params.py

@@ -117,8 +117,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 12 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/configuration/hadoop-env.xml

@@ -56,6 +56,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
@@ -111,14 +121,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 4 - 2
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/HDFS/package/scripts/params.py

@@ -199,8 +199,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/after-INSTALL/scripts/params.py

@@ -42,8 +42,10 @@ else:
   jsvc_path = "/usr/libexec/bigtop-utils"
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-ANY/scripts/params.py

@@ -69,8 +69,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
 jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-INSTALL/scripts/params.py

@@ -51,8 +51,10 @@ else:
   jsvc_path = "/usr/libexec/bigtop-utils"
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = default("/configurations/hadoop-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/params.py

@@ -113,8 +113,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
 mapred_pid_dir_prefix = "/var/run/hadoop-mapreduce"

+ 12 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/configuration/hadoop-env.xml

@@ -56,6 +56,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
@@ -119,14 +129,14 @@ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 export HADOOP_MAPRED_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 export HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA -Dmapred.log.dir=$HADOOP_MAPRED_LOG_DIR ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HDFS/package/scripts/params.py

@@ -164,8 +164,10 @@ else:
   jsvc_path = "/usr/libexec/bigtop-utils"
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = default("/configurations/mapred-env/jtnode_opt_newsize","200m")
 jtnode_opt_maxnewsize = default("/configurations/mapred-env/jtnode_opt_maxnewsize","200m")

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml

@@ -110,14 +110,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 10 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1073741824</value>

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py

@@ -53,8 +53,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py

@@ -64,8 +64,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py

@@ -131,8 +131,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 12 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/hadoop-env.xml

@@ -56,6 +56,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
@@ -116,14 +126,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 4 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/package/scripts/params.py

@@ -222,8 +222,10 @@ else:
 
 hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
 namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
+namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
+namenode_opt_permsize = config['configurations']['hadoop-env']['namenode_opt_permsize']
+namenode_opt_maxpermsize = config['configurations']['hadoop-env']['namenode_opt_maxpermsize']
 
 jtnode_opt_newsize = "200m"
 jtnode_opt_maxnewsize = "200m"

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/GLUSTERFS/configuration/hadoop-env.xml

@@ -110,14 +110,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 12 - 2
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -56,6 +56,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
@@ -111,14 +121,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -820,7 +820,7 @@ public class AmbariMetaInfoTest {
   public void testPropertyCount() throws Exception {
     Set<PropertyInfo> properties = metaInfo.getProperties(STACK_NAME_HDP, STACK_VERSION_HDP_02, SERVICE_NAME_HDFS);
     // 3 empty properties
-    Assert.assertEquals(99, properties.size());
+    Assert.assertEquals(103, properties.size());
   }
 
   @Test

+ 1 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java

@@ -115,7 +115,6 @@ import org.apache.ambari.server.state.StackInfo;
 import org.apache.ambari.server.state.State;
 import org.apache.ambari.server.state.configgroup.ConfigGroup;
 import org.apache.ambari.server.state.configgroup.ConfigGroupFactory;
-import org.apache.ambari.server.state.stack.OsFamily;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostOpSucceededEvent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostStartEvent;
@@ -158,7 +157,7 @@ public class AmbariManagementControllerTest {
   private static final int STACK_VERSIONS_CNT = 12;
   private static final int REPOS_CNT = 3;
   private static final int STACKS_CNT = 2;
-  private static final int STACK_PROPERTIES_CNT = 99;
+  private static final int STACK_PROPERTIES_CNT = 103;
   private static final int STACK_COMPONENTS_CNT = 4;
   private static final int OS_CNT = 2;
 

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/default.hbasedecom.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/default.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/default.non_gmetad_host.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/default_client.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 2 - 0
ambari-server/src/test/python/stacks/1.3.2/configs/secured.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 3 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/secured_client.json


+ 3 - 1
ambari-server/src/test/python/stacks/1.3.2/configs/secured_no_jce_name.json

@@ -222,7 +222,9 @@
             "lzo_enabled": "true", 
             "oozie_principal_name": "oozie/c6402.ambari.apache.org", 
             "dfs_datanode_address": "1019", 
-            "namenode_opt_newsize": "200m", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m", 
             "initLimit": "10", 
             "hive_database_type": "mysql", 
             "zk_pid_dir": "/var/run/zookeeper", 

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default.hbasedecom.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default.non_gmetad_host.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/default_client.json


+ 3 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/flume_target.json

@@ -281,7 +281,9 @@
             "nodemanager_heapsize": "1024", 
             "gmetad_user": "nobody", 
             "hive_log_dir": "/var/log/hive", 
-            "namenode_opt_newsize": "200m", 
+            "namenode_opt_newsize": "200m",
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m", 
             "mapred_user": "mapred", 
             "resourcemanager_heapsize": "1024", 
             "hive_pid_dir": "/var/run/hive", 

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 3 - 1
ambari-server/src/test/python/stacks/2.0.6/configs/ha_default.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/ha_secured.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/rebalancehdfs_default.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/secured.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/secured_client.json


+ 2 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/secured_no_jce_name.json

@@ -290,6 +290,8 @@
             "hive_hostname": "c6402.ambari.apache.org", 
             "mapred_pid_dir_prefix": "/var/run/hadoop-mapreduce", 
             "namenode_opt_newsize": "200m", 
+            "namenode_opt_permsize" : "128m",
+            "namenode_opt_maxpermsize" : "256m",
             "yarn_pid_dir_prefix": "/var/run/hadoop-yarn", 
             "initLimit": "10", 
             "hive_database_type": "mysql", 

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.1/configs/default-storm-start.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 2 - 0
ambari-server/src/test/python/stacks/2.1/configs/default.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.1/configs/secured-storm-start.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/python/stacks/2.1/configs/secured.json


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/resources/api_testscripts/curl-setup-multiple-hbase-master.sh


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
ambari-server/src/test/resources/deploy_HDP2.sh


+ 10 - 0
ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>640</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>

+ 12 - 2
ambari-server/src/test/resources/stacks/HDP/0.2/services/HDFS/configuration/hadoop-env.xml

@@ -51,6 +51,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>dtnode_heapsize</name>
     <value>1024</value>
@@ -129,14 +139,14 @@ export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
 
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
 HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
 HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -XX:PermSize={{namenode_opt_permsize}} -XX:MaxPermSize={{namenode_opt_maxpermsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"

+ 10 - 0
ambari-server/src/test/resources/stacks/HDP/1.3.0/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>640</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>

+ 10 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.1/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>640</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>

+ 10 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.5/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>640</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1</value>

+ 10 - 0
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HDFS/configuration/global.xml

@@ -86,6 +86,16 @@
     <value>200</value>
     <description>NameNode maximum new generation size</description>
   </property>
+  <property>
+    <name>namenode_opt_permsize</name>
+    <value>128</value>
+    <description>NameNode permanent generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxpermsize</name>
+    <value>256</value>
+    <description>NameNode maximum permanent generation size</description>
+  </property>
   <property>
     <name>datanode_du_reserved</name>
     <value>1073741824</value>

+ 2 - 0
ambari-web/app/assets/data/configuration/global.json

@@ -45,6 +45,8 @@
         "mapreduce_userlog_retainhours" : "24",
         "dfs_replication" : "3",
         "namenode_opt_newsize" : "200m",
+        "namenode_opt_permsize" : "128m",
+        "namenode_opt_maxpermsize" : "256m",
         "hcat_conf_dir" : "",
         "nagios_user" : "nagios",
         "mapred_child_java_opts_sz" : "768",

+ 2 - 0
ambari-web/app/assets/data/configurations/cluster_level_actual_configs.json

@@ -189,6 +189,8 @@
         "namenode_opt_maxnewsize" : "640m",
         "regionserver_memstore_lab" : "true",
         "namenode_opt_newsize" : "200m",
+        "namenode_opt_permsize" : "128m",
+        "namenode_opt_maxpermsize" : "256m",
         "mapreduce_userlog_retainhours" : "24",
         "hcat_conf_dir" : "",
         "nagios_user" : "nagios",

+ 2 - 0
ambari-web/app/assets/data/configurations/cluster_level_configs.json

@@ -503,6 +503,8 @@
         "namenode_opt_maxnewsize" : "640m",
         "regionserver_memstore_lab" : "true",
         "namenode_opt_newsize" : "200m",
+        "namenode_opt_permsize" : "128m",
+        "namenode_opt_maxpermsize" : "256m",
         "smokeuser" : "ambari-qa",
         "mapreduce_userlog_retainhours" : "24",
         "hcat_conf_dir" : "",

+ 24 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json

@@ -1021,6 +1021,30 @@
         "type" : "global.xml"
       }
     },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum permanent generation size",
+        "property_name" : "namenode_opt_maxpermsize",
+        "property_value" : "256",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode permanent generation size",
+        "property_name" : "namenode_opt_permsize",
+        "property_value" : "128",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "1.3.0",
+        "type" : "global.xml"
+      }
+    },
     {
       "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/proxyuser_group",
       "StackConfigurations" : {

+ 20 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/global.json

@@ -1724,6 +1724,26 @@
             "stack_name" : "HDP"
           }
         },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_permsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_maxpermsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
         {
           "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
           "StackConfigurations" : {

+ 24 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version131/HDFS.json

@@ -685,6 +685,30 @@
         "stack_name" : "HDP"
       }
     },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode permanent generation size",
+        "property_value" : "128",
+        "stack_version" : "1.3.1",
+        "property_name" : "namenode_opt_permsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum permanent generation size",
+        "property_value" : "256",
+        "stack_version" : "1.3.1",
+        "property_name" : "namenode_opt_maxpermsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
     {
       "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
       "StackConfigurations" : {

+ 20 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version131/global.json

@@ -1724,6 +1724,26 @@
             "stack_name" : "HDP"
           }
         },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.1",
+            "property_name" : "namenode_opt_permsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.1",
+            "property_name" : "namenode_opt_maxpermsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
         {
           "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
           "StackConfigurations" : {

+ 24 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json

@@ -1021,6 +1021,30 @@
         "type" : "global.xml"
       }
     },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HDFS/configurations/namenode_opt_permsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode permanent generation size",
+        "property_name" : "namenode_opt_permsize",
+        "property_value" : "128",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "2.0.1",
+        "type" : "global.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum permanent generation size",
+        "property_name" : "namenode_opt_maxpermsize",
+        "property_value" : "256",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "stack_version" : "2.0.1",
+        "type" : "global.xml"
+      }
+    },
     {
       "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HDFS/configurations/proxyuser_group",
       "StackConfigurations" : {

+ 20 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/global.json

@@ -1724,6 +1724,26 @@
             "stack_name" : "HDP"
           }
         },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_permsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_permsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxpermsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_maxpermsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
         {
           "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
           "StackConfigurations" : {

+ 1 - 1
ambari-web/app/controllers/main/service/info/configs.js

@@ -162,7 +162,7 @@ App.MainServiceInfoConfigsController = Em.Controller.extend(App.ServerValidatorM
   /**
    * Regular expression for heapsize properties detection
    */
-  heapsizeRegExp: /_heapsize|_newsize|_maxnewsize$/,
+  heapsizeRegExp: /_heapsize|_newsize|_maxnewsize|_permsize|_maxpermsize$/,
 
   /**
    * Dropdown menu items in filter combobox

+ 1 - 1
ambari-web/app/controllers/wizard/step8_controller.js

@@ -1640,7 +1640,7 @@ App.WizardStep8Controller = Em.Controller.extend(App.AddSecurityConfigs, {
         // do not pass any globals whose name ends with _host or _hosts
         if (_configProperty.isRequiredByAgent !== false) {
           // append "m" to JVM memory options except for heapsizeExtensions
-          if (/_heapsize|_newsize|_maxnewsize$/.test(_configProperty.name) && !heapsizeExceptions.contains(_configProperty.name)) {
+          if (/_heapsize|_newsize|_maxnewsize|_permsize|_maxpermsize$/.test(_configProperty.name) && !heapsizeExceptions.contains(_configProperty.name)) {
             properties[_configProperty.name] = _configProperty.value + "m";
           } else {
             properties[_configProperty.name] = _configProperty.value;

+ 58 - 0
ambari-web/app/data/BIGTOP/site_properties.js

@@ -1705,6 +1705,36 @@ module.exports =
       "category": "NAMENODE",
       "index": 4
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java new generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Maximum size of Java permanent generation for NameNode (Java option -XX:MaxPermSize).",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE",
+      "index": 6
+    },
     {
       "id": "puppet var",
       "name": "snamenode_host",
@@ -2290,6 +2320,34 @@ module.exports =
       "filename": "hadoop-env.xml",
       "category": "General Hadoop"
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java permanent generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Maximum size of Java permanent generation for NameNode (Java option -XX:MaxPermSize).",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
     {
       "id": "puppet var",
       "name": "dtnode_heapsize",

+ 58 - 0
ambari-web/app/data/HDP2/site_properties.js

@@ -1845,6 +1845,36 @@ module.exports =
       "category": "NAMENODE",
       "index": 3
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java permanent generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Maximum size of Java permanent generation for NameNode (Java option -XX:MaxPermSize).",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE",
+      "index": 6
+    },
     {
       "id": "puppet var",
       "name": "namenode_opt_maxnewsize",
@@ -2445,6 +2475,34 @@ module.exports =
       "filename": "hadoop-env.xml",
       "category": "General Hadoop"
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java permanent generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Maximum size of Java permanent generation for NameNode (Java option -XX:MaxPermSize).",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
     {
       "id": "puppet var",
       "name": "dtnode_heapsize",

+ 57 - 0
ambari-web/app/data/site_properties.js

@@ -868,6 +868,21 @@ module.exports =
       "category": "NAMENODE",
       "index": 3
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java permanent generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE",
+      "index": 4
+    },
     {
       "id": "puppet var",
       "name": "snamenode_host",
@@ -985,6 +1000,20 @@ module.exports =
       "filename": "hadoop-env.xml",
       "category": "NAMENODE"
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Default size of Java maximum permanent generation for NameNode (Java option -XX:MaxPermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HDFS",
+      "filename": "hadoop-env.xml",
+      "category": "NAMENODE"
+    },
     {
       "id": "puppet var",
       "name": "security_enabled",
@@ -1152,6 +1181,34 @@ module.exports =
       "filename": "hadoop-env.xml",
       "category": "General Hadoop"
     },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_permsize",
+      "displayName": "NameNode permanent generation size",
+      "description": "Default size of Java permanent generation for NameNode (Java option -XX:PermSize).  This also applies to the Secondary NameNode.",
+      "defaultValue": "128",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxpermsize",
+      "displayName": "NameNode maximum permanent generation size",
+      "description": "Maximum size of Java permanent generation for NameNode (Java option -XX:MaxPermSize).",
+      "defaultValue": "256",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "GLUSTERFS",
+      "filename": "hadoop-env.xml",
+      "category": "General Hadoop"
+    },
     {
       "id": "puppet var",
       "name": "dtnode_heapsize",

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio