소스 검색

AMBARI-2194. Hadoop2 Installer: MapReduce2 shows only advanced section. (srimanth)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1485900 13f79535-47bb-0310-9956-ffa450edef68
Srimanth 12 년 전
부모
커밋
f7d3565afa

+ 3 - 0
CHANGES.txt

@@ -309,6 +309,9 @@ Trunk (unreleased changes):
 
  IMPROVEMENTS
 
+ AMBARI-2194. Hadoop2 Installer: MapReduce2 shows only advanced section. 
+ (srimanth)
+
  AMBARI-2187. Hadoop2 Monitoring: Jobs page should be hidden when HDP 2.0.x 
  stack is installed. (srimanth)
 

+ 178 - 178
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCEv2.json → ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCE2.json

@@ -2,544 +2,544 @@
   "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations?fields=*",
   "items" : [
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.merge.percent",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.system.dir",
       "StackConfigurations" : {
-        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
-        "property_value" : "0.66",
+        "property_description" : "No description",
+        "property_value" : "/mapred/system",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.shuffle.merge.percent",
+        "property_name" : "mapred.system.dir",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.inmem.merge.threshold",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.parallel.copies",
       "StackConfigurations" : {
-        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
-        "property_value" : "1000",
+        "property_description" : "No description",
+        "property_value" : "30",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.inmem.merge.threshold",
+        "property_name" : "mapred.reduce.parallel.copies",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.active",
       "StackConfigurations" : {
-        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
-        "property_value" : "15",
+        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.factor",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
       "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "100",
+        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
+        "property_value" : "180",
         "stack_version" : "2.0.1",
-        "property_name" : "io.sort.factor",
+        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-administer-jobs",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.cluster.administrators",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : "*",
+        "property_value" : " hadoop",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.default.acl-administer-jobs",
+        "property_name" : "mapreduce.cluster.administrators",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
+        "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-submit-job",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : "*",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.default.acl-submit-job",
+        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
+        "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.completeuserjobs.maximum",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.shuffle.port",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5",
+        "property_description" : "Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.",
+        "property_value" : "8081",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+        "property_name" : "mapreduce.shuffle.port",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
       "StackConfigurations" : {
-        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
-        "property_value" : "50000000",
+        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
+        "property_value" : "250",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.restart.recover",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.reduce.input.limit",
       "StackConfigurations" : {
-        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
-        "property_value" : "false",
+        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
+        "property_value" : "10737418240",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.restart.recover",
+        "property_name" : "mapreduce.reduce.input.limit",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.interval",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.tasks.speculative.execution",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "135000",
+        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.healthChecker.interval",
+        "property_name" : "mapred.reduce.tasks.speculative.execution",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.input.buffer.percent",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
       "StackConfigurations" : {
-        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
-        "property_value" : "0.7",
+        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
+        "property_value" : "15",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.shuffle.input.buffer.percent",
+        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reuse.jvm.num.tasks",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.record.percent",
       "StackConfigurations" : {
-        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
-        "property_value" : "1",
+        "property_description" : "No description",
+        "property_value" : ".2",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.reuse.jvm.num.tasks",
+        "property_name" : "io.sort.record.percent",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.done-dir",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.history.server.embedded",
       "StackConfigurations" : {
-        "property_description" : "Directory where history files are managed by the MR JobHistory Server.",
-        "property_value" : "/mr-history/done",
+        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobhistory.done-dir",
+        "property_name" : "mapreduce.history.server.embedded",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.intermediate-done-dir",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/hadoop.job.history.user.location",
       "StackConfigurations" : {
-        "property_description" : "Directory where history files are written by MapReduce jobs.",
-        "property_value" : "/mr-history/tmp",
+        "property_description" : null,
+        "property_value" : "none",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobhistory.intermediate-done-dir",
+        "property_name" : "hadoop.job.history.user.location",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.hours",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.input.buffer.percent",
       "StackConfigurations" : {
-        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
-        "property_value" : "1",
+        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
+        "property_value" : "0.7",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+        "property_name" : "mapred.job.shuffle.input.buffer.percent",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.system.dir",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.handler.count",
       "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/system",
+        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
+        "property_value" : "50",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.system.dir",
+        "property_name" : "mapred.job.tracker.handler.count",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.names",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
       "StackConfigurations" : {
-        "property_description" : " Comma separated list of queues configured for this jobtracker.",
-        "property_value" : "default",
+        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
+        "property_value" : "50000000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.names",
+        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.history.completed.location",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.interval",
       "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/history/done",
+        "property_description" : null,
+        "property_value" : "135000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.history.completed.location",
+        "property_name" : "mapred.healthChecker.interval",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.shuffle.port",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.output.compression.type",
       "StackConfigurations" : {
-        "property_description" : "Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.",
-        "property_value" : "8081",
+        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
+        "property_value" : "BLOCK",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.shuffle.port",
+        "property_name" : "mapred.output.compression.type",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.active",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.staging.root.dir",
       "StackConfigurations" : {
-        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
-        "property_value" : "false",
+        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
+        "property_value" : "/user",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+        "property_name" : "mapreduce.jobtracker.staging.root.dir",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.map.tasks.speculative.execution",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-administer-jobs",
       "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
-        "property_value" : "false",
+        "property_description" : null,
+        "property_value" : "*",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.map.tasks.speculative.execution",
+        "property_name" : "mapred.queue.default.acl-administer-jobs",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
+        "type" : "mapred-queue-acls.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.child.root.logger",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : "false",
+        "property_value" : "INFO,TLA",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+        "property_name" : "mapred.child.root.logger",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.interval",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.done-dir",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "21600000",
+        "property_description" : "Directory where history files are managed by the MR JobHistory Server.",
+        "property_value" : "/mr-history/done",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.retirejob.interval",
+        "property_name" : "mapreduce.jobhistory.done-dir",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.staging.root.dir",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.map.tasks.speculative.execution",
       "StackConfigurations" : {
-        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
-        "property_value" : "/user",
+        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobtracker.staging.root.dir",
+        "property_name" : "mapred.map.tasks.speculative.execution",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.script.timeout",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.task.timeout",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "60000",
+        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
+        "property_value" : "600000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.healthChecker.script.timeout",
+        "property_name" : "mapred.task.timeout",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.max.tracker.blacklists",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.names",
       "StackConfigurations" : {
-        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
-        "property_value" : "16",
+        "property_description" : " Comma separated list of queues configured for this jobtracker.",
+        "property_value" : "default",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.max.tracker.blacklists",
+        "property_name" : "mapred.queue.names",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.output.compression.type",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.tasktracker.group",
       "StackConfigurations" : {
-        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
-        "property_value" : "BLOCK",
+        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+        "property_value" : "hadoop",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.output.compression.type",
+        "property_name" : "mapreduce.tasktracker.group",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.check",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.merge.percent",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "10000",
+        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
+        "property_value" : "0.66",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.retirejob.check",
+        "property_name" : "mapred.job.shuffle.merge.percent",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.child.root.logger",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.check",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : "INFO,TLA",
+        "property_value" : "10000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.child.root.logger",
+        "property_name" : "mapred.jobtracker.retirejob.check",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/hadoop.job.history.user.location",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.interval",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : "none",
+        "property_value" : "21600000",
         "stack_version" : "2.0.1",
-        "property_name" : "hadoop.job.history.user.location",
+        "property_name" : "mapred.jobtracker.retirejob.interval",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.task.timeout",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.completeuserjobs.maximum",
       "StackConfigurations" : {
-        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
-        "property_value" : "600000",
+        "property_description" : null,
+        "property_value" : "5",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.task.timeout",
+        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.tasks.speculative.execution",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reduce.input.buffer.percent",
       "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
-        "property_value" : "false",
+        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
+        "property_value" : "0.0",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.tasks.speculative.execution",
+        "property_name" : "mapred.job.reduce.input.buffer.percent",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.handler.count",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.intermediate-done-dir",
       "StackConfigurations" : {
-        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
-        "property_value" : "50",
+        "property_description" : "Directory where history files are written by MapReduce jobs.",
+        "property_value" : "/mr-history/tmp",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.handler.count",
+        "property_name" : "mapreduce.jobhistory.intermediate-done-dir",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.slowstart.completed.maps",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.inmem.merge.threshold",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "0.05",
+        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
+        "property_value" : "1000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.slowstart.completed.maps",
+        "property_name" : "mapred.inmem.merge.threshold",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reduce.input.buffer.percent",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reuse.jvm.num.tasks",
       "StackConfigurations" : {
-        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
-        "property_value" : "0.0",
+        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
+        "property_value" : "1",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.reduce.input.buffer.percent",
+        "property_name" : "mapred.job.reuse.jvm.num.tasks",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.reduce.input.limit",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.history.completed.location",
       "StackConfigurations" : {
-        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
-        "property_value" : "10737418240",
+        "property_description" : "No description",
+        "property_value" : "/mapred/history/done",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.reduce.input.limit",
+        "property_name" : "mapred.job.tracker.history.completed.location",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.record.percent",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/jetty.connector",
       "StackConfigurations" : {
         "property_description" : "No description",
-        "property_value" : ".2",
+        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
         "stack_version" : "2.0.1",
-        "property_name" : "io.sort.record.percent",
+        "property_name" : "jetty.connector",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.slowstart.completed.maps",
       "StackConfigurations" : {
-        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
-        "property_value" : "250",
+        "property_description" : null,
+        "property_value" : "0.05",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+        "property_name" : "mapred.reduce.slowstart.completed.maps",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/tasktracker.http.threads",
       "StackConfigurations" : {
-        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
-        "property_value" : "180",
+        "property_description" : null,
+        "property_value" : "50",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+        "property_name" : "tasktracker.http.threads",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.history.server.embedded",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.factor",
       "StackConfigurations" : {
-        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
-        "property_value" : "false",
+        "property_description" : "No description",
+        "property_value" : "100",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.history.server.embedded",
+        "property_name" : "io.sort.factor",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.tasktracker.group",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.hours",
       "StackConfigurations" : {
-        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
-        "property_value" : "hadoop",
+        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
+        "property_value" : "1",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.tasktracker.group",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/tasktracker.http.threads",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.restart.recover",
       "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "50",
+        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
+        "property_value" : "false",
         "stack_version" : "2.0.1",
-        "property_name" : "tasktracker.http.threads",
+        "property_name" : "mapred.jobtracker.restart.recover",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/jetty.connector",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.max.tracker.blacklists",
       "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
+        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
+        "property_value" : "16",
         "stack_version" : "2.0.1",
-        "property_name" : "jetty.connector",
+        "property_name" : "mapred.max.tracker.blacklists",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.parallel.copies",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.script.timeout",
       "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "30",
+        "property_description" : null,
+        "property_value" : "60000",
         "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.parallel.copies",
+        "property_name" : "mapred.healthChecker.script.timeout",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
         "type" : "mapred-site.xml"
       }
     },
     {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.cluster.administrators",
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-submit-job",
       "StackConfigurations" : {
         "property_description" : null,
-        "property_value" : " hadoop",
+        "property_value" : "*",
         "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.cluster.administrators",
+        "property_name" : "mapred.queue.default.acl-submit-job",
         "service_name" : "MAPREDUCE2",
         "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
+        "type" : "mapred-queue-acls.xml"
       }
     }
   ]
-}
+}

+ 3 - 2
ambari-web/app/controllers/main/admin/cluster.js

@@ -17,6 +17,7 @@
  */
 
 var App = require('app');
+var stringUtils = require('utils/string_utils');
 
 App.MainAdminClusterController = Em.Controller.extend({
   name:'mainAdminClusterController',
@@ -44,12 +45,12 @@ App.MainAdminClusterController = Em.Controller.extend({
     var minUpgradeVersion = currentVersion;
     upgradeVersion = upgradeVersion.replace(/HDP-/, '');
     data.items.mapProperty('Versions.stack_version').forEach(function(version){
-      upgradeVersion = (upgradeVersion < version) ? version : upgradeVersion;
+      upgradeVersion = (stringUtils.compareVersions(upgradeVersion, version) === -1) ? version : upgradeVersion;
     });
     currentStack = data.items.findProperty('Versions.stack_version', currentVersion);
     upgradeStack = data.items.findProperty('Versions.stack_version', upgradeVersion);
     minUpgradeVersion = upgradeStack.Versions.min_upgrade_version;
-    if(minUpgradeVersion && (minUpgradeVersion > currentVersion)){
+    if(minUpgradeVersion && (stringUtils.compareVersions(minUpgradeVersion, currentVersion) === 1)){
       upgradeVersion = currentVersion;
       upgradeStack = currentStack;
     }

+ 2129 - 0
ambari-web/app/data/HDP2/config_properties.js

@@ -0,0 +1,2129 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Defines service configuration properties.
+ *   name:
+ *     The name of the config property that is understood by Ambari server and agent.
+ *     E.g., "datanode_du_reserved"
+ *
+ *   displayName:
+ *     The human-friendly display name of the config property.
+ *     E.g., "Reserved space for HDFS"
+ *
+ *   description:
+ *     The description of the config property.
+ *     E.g., "Reserved space in GB per volume"
+ *
+ *   defaultValue:
+ *     The default value of the config property.
+ *     E.g., "1"
+ *
+ *   isReconfigurable:
+ *     Whether the config property can be reconfigured after it has been initially set and deployed.
+ *     If this is unspecified, true is assumed.
+ *     E.g., true, false
+ *
+ *   isOverridable:
+ *     Whether the config property can be overridden by hosts.
+ *     If this is unspecified, true is assumed.
+ *
+ *   isRequired:
+ *     Whether the config property is required or not.
+ *     If this is unspecified, true is assumed.
+ *     E.g., true, false
+ *
+ *   displayType:
+ *     How the config property is to be rendered for user input.
+ *     If this is left unspecified, "string" is assumed
+ *     E.g., "string", "int", "float", "checkbox", "directories", "custom", "email", "masterHost", "slaveHosts"
+ *
+ *   unit
+ *     The unit for the config property.
+ *     E.g., "ms", "MB", "bytes"
+ *
+ *   serviceName:
+ *     The service that the config property belongs to.
+ *     E.g., "HDFS", "MAPREDUCE", "ZOOKEEPER", etc.
+ *
+ *   category: the category that the config property belongs to (used for grouping config properties in the UI).
+ *     if unspecified, "General" is assumed.
+ *     E.g., "General", "Advanced", "NameNode", "DataNode"
+ *
+ *   index: the sequence number in category, that point to place where config located regarding all rest in category.
+ *     if unspecified, push to the end of array.
+ *     E.g., 0, 1, '2'
+ */
+
+var App = require('app');
+require('config');
+
+module.exports =
+{
+  "configProperties": [
+    /**********************************************HDFS***************************************/
+    {
+      "id": "puppet var",
+      "name": "namenode_host",
+      "displayName": "NameNode host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run NameNode",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_name_dir",
+      "displayName": "NameNode directories",
+      "description": "NameNode directories for HDFS to store the file system image",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/namenode",
+      "displayType": "directories",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_heapsize",
+      "displayName": "NameNode Java heap size",
+      "description": "Initial and maximum Java heap size for NameNode (Java options -Xms and -Xmx)",
+      "defaultValue": "1024",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_newsize",
+      "displayName": "NameNode new generation size",
+      "description": "Default size of Java new generation for NameNode (Java option -XX:NewSize)",
+      "defaultValue": "200",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "snamenode_host",
+      "displayName": "SNameNode host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run SecondaryNameNode",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "SNameNode",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "fs_checkpoint_dir",
+      "displayName": "SecondaryNameNode Checkpoint directory",
+      "description": "Directory on the local filesystem where the Secondary NameNode should store the temporary images to merge",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/namesecondary",
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "SNameNode",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "datanode_hosts", //not in the schema. For UI purpose
+      "displayName": "DataNode hosts",
+      "value": "",
+      "defaultValue": "",
+      "description": "The hosts that have been assigned to run DataNode",
+      "displayType": "slaveHosts",
+      "isRequired": false,
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "datanode-global",
+      "serviceName": "HDFS",
+      "category": "DataNode",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_data_dir",
+      "displayName": "DataNode directories",
+      "description": "DataNode directories for HDFS to store the data blocks",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/hdfs/data",
+      "displayType": "directories",
+      "isVisible": true,
+      "domain": "datanode-global",
+      "serviceName": "HDFS",
+      "category": "DataNode",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "dtnode_heapsize",
+      "displayName": "DataNode maximum Java heap size",
+      "description": "Maximum Java heap size for DataNode (Java option -Xmx)",
+      "defaultValue": "1024",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "domain": "datanode-global",
+      "serviceName": "HDFS",
+      "category": "DataNode",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_failed_volume_tolerated",
+      "displayName": "DataNode volumes failure toleration",
+      "description": "The number of volumes that are allowed to fail before a DataNode stops offering service",
+      "defaultValue": "0",
+      "displayType": "int",
+      "isVisible": true,
+      "domain": "datanode-global",
+      "serviceName": "HDFS",
+      "category": "DataNode",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_webhdfs_enabled",
+      "displayName": "WebHDFS enabled",
+      "description": "Whether to enable WebHDFS feature",
+      "defaultValue": true,
+      "displayType": "checkbox",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "hadoop_heapsize",
+      "displayName": "Hadoop maximum Java heap size",
+      "description": "Maximum Java heap size for daemons such as Balancer (Java option -Xmx)",
+      "defaultValue": "1024",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "datanode_du_reserved",
+      "displayName": "Reserved space for HDFS",
+      "description": "Reserved space in GB per volume",
+      "defaultValue": "1",
+      "displayType": "int",
+      "unit": "GB",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "fs_checkpoint_period",
+      "displayName": "HDFS Maximum Checkpoint Delay",
+      "description": "Maximum delay between two consecutive checkpoints for HDFS",
+      "defaultValue": "21600",
+      "displayType": "int",
+      "unit": "seconds",
+      "isVisible": true,
+      "domain": "global",
+      "filename": "core-site.xml",
+      "serviceName": "HDFS",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "fs_checkpoint_size",
+      "displayName": "HDFS Maximum Edit Log Size for Checkpointing",
+      "description": "Maximum size of the edits log file that forces an urgent checkpoint even if the maximum checkpoint delay is not reached",
+      "defaultValue": "0.5",
+      "displayType": "float",
+      "unit": "GB",
+      "isVisible": true,
+      "domain": "global",
+      "filename": "core-site.xml",
+      "serviceName": "HDFS",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "hdfs_log_dir_prefix",
+      "displayName": "Hadoop Log Dir Prefix",
+      "description": "The parent directory for Hadoop log files.  The HDFS log directory will be ${hadoop_log_dir_prefix} / ${hdfs_user} and the MapReduce log directory will be ${hadoop_log_dir_prefix} / ${mapred_user}.",
+      "defaultValue": "/var/log/hadoop",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hadoop_pid_dir_prefix",
+      "displayName": "Hadoop PID Dir Prefix",
+      "description": "The parent directory in which the PID files for Hadoop processes will be created.  The HDFS PID directory will be ${hadoop_pid_dir_prefix} / ${hdfs_user} and the MapReduce PID directory will be ${hadoop_pid_dir_prefix} / ${mapred_user}.",
+      "defaultValue": "/var/run/hadoop",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_opt_maxnewsize",
+      "displayName": "NameNode maximum new generation size",
+      "description": "",
+      "defaultValue": "640",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_exclude",
+      "displayName": "Exclude hosts",
+      "description": "Names a file that contains a list of hosts that are not permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
+      "defaultValue": "dfs.exclude",
+      "displayType": "advanced",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_include",
+      "displayName": "Include hosts",
+      "description": "Names a file that contains a list of hosts that are permitted to connect to the namenode.  This file will be placed inside the Hadoop conf directory.",
+      "defaultValue": "dfs.include",
+      "displayType": "advanced",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_replication",
+      "displayName": "Block replication",
+      "description": "Default block replication.",
+      "displayType": "int",
+      "defaultValue": "3",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_block_local_path_access_user",
+      "displayName": "dfs.block.local-path-access.user",
+      "description": "the user who is allowed to perform short circuit reads",
+      "displayType": "advanced",
+      "defaultValue": "hbase",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_data_dir_perm",
+      "displayName": "dfs_datanode_data_dir_perm",
+      "description": "",
+      "defaultValue": "750",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": false,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "security_enabled",
+      "displayName": "Hadoop Security",
+      "description": "Enable hadoop security",
+      "defaultValue": false,
+      "isRequired": false,
+      "displayType": "checkbox",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "kerberos_domain",
+      "displayName": "Kerberos realm",
+      "description": "Kerberos realm",
+      "defaultValue": "EXAMPLE.COM",
+      "isRequired": true,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "kadmin_pw",
+      "displayName": "password",
+      "description": "Kerberos admin password",
+      "defaultValue": "",
+      "isRequired": true,
+      "displayType": "password",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "keytab_path",
+      "displayName": "Keytab directory",
+      "description": "Kerberos admin password",
+      "defaultValue": "/etc/security/keytabs",
+      "isRequired": true,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "namenode_formatted_mark_dir",
+      "displayName": "Hadoop formatted mark directory",
+      "description": "",
+      "defaultValue": "/var/run/hadoop/hdfs/namenode/formatted/",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "NameNode"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_conf_dir",
+      "displayName": "HCat conf directory",
+      "description": "",
+      "defaultValue": "",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HDFS",
+      "category": "Advanced"
+    },
+
+  /**********************************************MAPREDUCE2***************************************/
+    {
+      "id": "puppet var",
+      "name": "mapreduce_framework_name",
+      "displayName": "mapreduce.framework.name",
+      "description": "Execution framework set to Hadoop YARN.",
+      "defaultValue": "yarn",
+      "isVisible": true,
+      "isOverridable": false,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_map_memory_mb",
+      "displayName": "mapreduce.map.memory.mb",
+      "description": "Larger resource limit for maps.",
+      "defaultValue": "1536",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_map_java_opts",
+      "displayName": "mapreduce.map.java.opts",
+      "description": "Larger heap-size for child jvms of maps.",
+      "defaultValue": "-Xmx1024M",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_reduce_memory_mb",
+      "displayName": "mapreduce.reduce.memory.mb",
+      "description": "Larger resource limit for reduces.",
+      "defaultValue": "3072",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_reduce_java_opts",
+      "displayName": "mapreduce.reduce.java.opts",
+      "description": "Larger heap-size for child jvms of reduces.",
+      "defaultValue": "-Xmx2560M",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_task_io_sort_mb",
+      "displayName": "mapreduce.task.io.sort.mb",
+      "description": "Higher memory-limit while sorting data for efficiency.",
+      "defaultValue": "512",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_task_io_sort_factor",
+      "displayName": "mapreduce.task.io.sort.factor",
+      "description": "More streams merged at once while sorting files.",
+      "defaultValue": "100",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_reduce_shuffle_parallelcopies",
+      "displayName": "mapreduce.reduce.shuffle.parallelcopies",
+      "description": "Higher number of parallel copies run by reduces to fetch outputs from very large number of maps.",
+      "defaultValue": "50",
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2"
+    },
+    /*{
+      "id": "puppet var",
+      "name": "mapreduce.jobhistory.address",
+      "displayName": "JobHistory Server",
+      "defaultValue": "",
+      "description": "Default port is 10020.",
+      "displayType": "masterHosts",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "MAPREDUCE2",
+      "category": "HistoryServer"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce.jobhistory.webapp.address",
+      "displayName": "JobHistory Server Web UI",
+      "defaultValue": "",
+      "description": "Default port is 19888.",
+      "isOverridable": false,
+      "isVisible": false,
+      "serviceName": "MAPREDUCE2",
+      "category": "HistoryServer"
+    },*/
+    {
+      "id": "puppet var",
+      "name": "historyserver_host",
+      "displayName": "JobHistory Server",
+      "value": "",
+      "description": "The host that has been assigned to run History Server",
+      "displayType": "masterHosts",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2",
+      "category": "HistoryServer",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_jobhistory_intermediate-done-dir",
+      "displayName": "mapreduce.jobhistory.intermediate-done-dir",
+      "defaultValue": "/mr-history/tmp",
+      "description": "Directory where history files are written by MapReduce jobs.",
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2",
+      "category": "HistoryServer",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "mapreduce_jobhistory_done-dir",
+      "displayName": "mapreduce.jobhistory.done-dir",
+      "defaultValue": "/mr-history/done",
+      "description": "Directory where history files are managed by the MR JobHistory Server.",
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "filename": "mapred-site.xml",
+      "serviceName": "MAPREDUCE2",
+      "category": "HistoryServer",
+      "index": 2
+    },
+
+  /**********************************************HBASE***************************************/
+    {
+      "id": "puppet var",
+      "name": "hbasemaster_host",
+      "displayName": "HBase Master hosts",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run HBase Master",
+      "displayType": "masterHosts",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "HBase Master",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_master_heapsize",
+      "displayName": "HBase Master Maximum Java heap size",
+      "description": "Maximum Java heap size for HBase master (Java option -Xmx)",
+      "defaultValue": "1024",
+      "displayType": "int",
+      "unit": "MB",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "HBase Master",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "regionserver_hosts",
+      "displayName": "RegionServer hosts",
+      "value": "",
+      "defaultValue": "",
+      "description": "The hosts that have been assigned to run RegionServer",
+      "displayType": "slaveHosts",
+      "isOverridable": false,
+      "isVisible": true,
+      "isRequired": false,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_regionserver_heapsize",
+      "displayName": "HBase RegionServers maximum Java heap size",
+      "description": "Maximum Java heap size for HBase RegionServers (Java option -Xmx)",
+      "defaultValue": "1024",
+      "displayType": "int",
+      "unit": "MB",
+      "isVisible": true,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "regionserver_handlers",
+      "displayName": "HBase RegionServer Handler",
+      "description": "Count of RPC Listener instances spun up on RegionServers",
+      "defaultValue": "30",
+      "displayType": "int",
+      "isVisible": true,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "hregion_majorcompaction",
+      "displayName": "HBase Region Major Compaction",
+      "description": "The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.",
+      "defaultValue": "86400000",
+      "displayType": "int",
+      "unit": "ms",
+      "isVisible": true,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "hregion_blockmultiplier",
+      "displayName": "HBase Region Block Multiplier",
+      "description": "Block updates if memstore has \"Multiplier * HBase Region Memstore Flush Size\" bytes. Useful preventing runaway memstore during spikes in update traffic",
+      "defaultValue": "2",
+      "displayType": "int",
+      "isVisible": true,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "hregion_memstoreflushsize",
+      "displayName": "HBase Region Memstore Flush Size",
+      "description": "Memstore will be flushed to disk if size of the memstore exceeds this number of bytes.",
+      "defaultValue": "134217728",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "domain": "regionserver-global",
+      "serviceName": "HBASE",
+      "category": "RegionServer",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "hstore_compactionthreshold",
+      "displayName": "HBase HStore compaction threshold",
+      "description": "If more than this number of HStoreFiles in any one HStore then a compaction is run to rewrite all HStoreFiles files as one.",
+      "defaultValue": "3",
+      "displayType": "int",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "hfile_blockcache_size",
+      "displayName": "HFile block cache size ",
+      "description": "Percentage of maximum heap (-Xmx setting) to allocate to block cache used by HFile/StoreFile. Set to 0 to disable but it's not recommended.",
+      "defaultValue": "0.25",
+      "displayType": "float",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "hstorefile_maxsize",
+      "displayName": "Maximum HStoreFile Size",
+      "description": "If any one of a column families' HStoreFiles has grown to exceed this value, the hosting HRegion is split in two.",
+      "defaultValue": "1073741824",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "client_scannercaching",
+      "displayName": "HBase Client Scanner Caching",
+      "description": "Number of rows that will be fetched when calling next on a scanner if it is not served from (local, client) memory. Do not set this value such that the time between invocations is greater than the scanner timeout",
+      "defaultValue": "100",
+      "displayType": "int",
+      "unit": "rows",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "zookeeper_sessiontimeout",
+      "displayName": "Zookeeper timeout for HBase Session",
+      "description": "HBase passes this to the zk quorum as suggested maximum time for a session",
+      "defaultValue": "60000",
+      "displayType": "int",
+      "unit": "ms",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "hfile_max_keyvalue_size",
+      "displayName": "HBase Client Maximum key-value Size",
+      "description": "Specifies the combined maximum allowed size of a KeyValue instance. It should be set to a fraction of the maximum region size.",
+      "defaultValue": "10485760",
+      "displayType": "int",
+      "unit": "bytes",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_log_dir",
+      "displayName": "HBase Log Dir",
+      "description": "Directory for HBase logs",
+      "defaultValue": "/var/log/hbase",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_pid_dir",
+      "displayName": "HBase PID Dir",
+      "description": "Directory in which the pid files for HBase processes will be created",
+      "defaultValue": "/var/run/hbase",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_hdfs_root_dir",
+      "displayName": "Hbase relative path to HDFS",
+      "description": "Hbase relative directory to HDFS",
+      "defaultValue": "/apps/hbase/data",
+      "isRequired": true,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_tmp_dir",
+      "displayName": "Hbase temp directory",
+      "description": "",
+      "defaultValue": "/var/log/hbase",
+      "isRequired": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hdfs_enable_shortcircuit_read",
+      "displayName": "HDFS Short-circuit read",
+      "description": "",
+      "defaultValue": true,
+      "isRequired": false,
+      "displayType": "checkbox",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hdfs_support_append",
+      "displayName": "HDFS append support",
+      "description": "HDFS append support",
+      "defaultValue": true,
+      "isRequired": false,
+      "displayType": "checkbox",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hstore_blockingstorefiles",
+      "displayName": "hstore blocking storefiles",
+      "description": "If more than this number of StoreFiles in any one Store (one StoreFile is written per flush of " +
+        "MemStore) then updates are blocked for this HRegion until a compaction is completed, or until " +
+        "hbase.hstore.blockingWaitTime has been exceeded.",
+      "defaultValue": 7,
+      "isRequired": true,
+      "displayType": "init",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "regionserver_memstore_lab",
+      "displayName": "regionserver_memstore_lab",
+      "description": "",
+      "defaultValue": true,
+      "isRequired": false,
+      "displayType": "checkbox",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "regionserver_memstore_lowerlimit",
+      "displayName": "regionserver_memstore_lowerlimit",
+      "description": "",
+      "defaultValue": "0.35",
+      "isRequired": false,
+      "displayType": "float",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "regionserver_memstore_upperlimit",
+      "displayName": "regionserver_memstore_upperlimit",
+      "description": "",
+      "defaultValue": "0.4",
+      "isRequired": true,
+      "displayType": "float",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HBASE",
+      "category": "Advanced"
+    },
+  /**********************************************HIVE***************************************/
+    {
+      "id": "puppet var",
+      "name": "hivemetastore_host",
+      "displayName": "Hive Metastore host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run Hive Metastore",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 0
+    },
+    // for existing MySQL
+    {
+      "id": "puppet var",
+      "name": "hive_existing_mysql_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "MySQL",
+      "description": "Using an existing MySQL database for Hive Metastore",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": false,
+      "isReconfigurable": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 1
+    },
+    // for existing Oracle
+    {
+      "id": "puppet var",
+      "name": "hive_existing_oracle_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "Oracle",
+      "description": "Using an existing Oracle database for Hive Metastore",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isOverridable": false,
+      "isReconfigurable": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 1
+    },
+    // for new MySQL
+    {
+      "id": "puppet var",
+      "name": "hive_ambari_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "MySQL",
+      "description": "MySQL will be installed by Ambari",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_database",
+      "displayName": "Hive Database",
+      "value": "",
+      "defaultValue": "New MySQL Database",
+      "options": [
+        {
+          displayName: 'New MySQL Database',
+          foreignKeys: ['hive_ambari_database', 'hive_ambari_host']
+        },
+        {
+          displayName: 'Existing MySQL Database',
+          foreignKeys: ['hive_existing_mysql_database', 'hive_existing_mysql_host']
+        },
+        {
+          displayName: 'Existing Oracle Database',
+          foreignKeys: ['hive_existing_oracle_database', 'hive_existing_oracle_host'],
+          hidden: !App.supports.hiveOozieExtraDatabases
+        }
+      ],
+      "description": "MySQL will be installed by Ambari",
+      "displayType": "radio button",
+      "isReconfigurable": false,
+      "radioName": "hive-database",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_existing_mysql_host",
+      "displayName": "Database Host",
+      "description": "Specify the host on which the existing database is hosted",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "host",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_existing_oracle_host",
+      "displayName": "Database Host",
+      "description": "Specify the host on which the existing database is hosted",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "host",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_ambari_host",
+      "value": "",
+      "defaultValue": "",
+      "displayName": "Database Host",
+      "description": "Host on which the database will be created by Ambari",
+      "isReconfigurable": false,
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_database_name",
+      "displayName": "Database Name",
+      "description": "Database name used as the Hive Metastore",
+      "defaultValue": "hive",
+      "isReconfigurable": false,
+      "displayType": "host",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_metastore_user_name",
+      "displayName": "Database Username",
+      "description": "Database user name to use to connect to the database",
+      "defaultValue": "hive",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_metastore_user_passwd",
+      "displayName": "Database Password",
+      "description": "Database password to use to connect to the PostgreSQL database",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "password",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Hive Metastore",
+      "index": 6
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_metastore_port",
+      "displayName": "Hive metastore port",
+      "description": "",
+      "defaultValue": "9083",
+      "isReconfigurable": false,
+      "displayType": "int",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_lib",
+      "displayName": "Hive library",
+      "description": "",
+      "defaultValue": "/usr/lib/hive/lib/",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_conf_dir",
+      "displayName": "Hive conf directory",
+      "description": "",
+      "defaultValue": "/etc/hive/conf",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_dbroot",
+      "displayName": "Hive db directory",
+      "description": "",
+      "defaultValue": "/usr/lib/hive/lib",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_log_dir",
+      "displayName": "Hive Log Dir",
+      "description": "Directory for Hive log files",
+      "defaultValue": "/var/log/hive",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_pid_dir",
+      "displayName": "Hive PID Dir",
+      "description": "Directory in which the PID files for Hive processes will be created",
+      "defaultValue": "/var/run/hive",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "mysql_connector_url",
+      "displayName": "MySQL connector url",
+      "description": "",
+      "defaultValue": "${download_url}/mysql-connector-java-5.1.18.zip",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_aux_jars_path",
+      "displayName": "Hive auxilary jar path",
+      "description": "",
+      "defaultValue": "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "HIVE",
+      "category": "Advanced"
+    },
+  /**********************************************WEBHCAT***************************************/
+    {
+      "id": "puppet var",
+      "name": "webhcatserver_host",
+      "displayName": "WebHCat Server host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run WebHCat Server",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "WEBHCAT",
+      "category": "WebHCat Server"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_log_dir",
+      "displayName": "WebHCat Log Dir",
+      "description": "Directory for WebHCat log files",
+      "defaultValue": "/var/log/webhcat",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "WEBHCAT",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_pid_dir",
+      "displayName": "WebHCat PID Dir",
+      "description": "Directory in which the PID files for WebHCat processes will be created",
+      "defaultValue": "/var/run/webhcat",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "WEBHCAT",
+      "category": "Advanced"
+    },
+  /**********************************************OOZIE***************************************/
+    {
+      "id": "puppet var",
+      "name": "oozieserver_host",
+      "displayName": "Oozie Server host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run Oozie Server",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 0
+    },
+    // for existing Oracle
+    {
+      "id": "puppet var",
+      "name": "oozie_existing_oracle_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "Oracle",
+      "description": "Using an existing Oracle database for Oozie Metastore",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isReconfigurable": false,
+      "isOverridable": false,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 1
+    },
+    // for new MySQL
+    {
+      "id": "puppet var",
+      "name": "oozie_ambari_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "MySQL",
+      "description": "MySQL will be installed by Ambari",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isOverridable": false,
+      // "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 1
+    },
+    // for current derby
+    {
+      "id": "puppet var",
+      "name": "oozie_derby_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "Derby",
+      "description": "Using current Derby database for Oozie Metastore",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isReconfigurable": false,
+      "isOverridable": false,
+      // "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 1
+    },
+    // for existing MySQL oozie
+    {
+      "id": "puppet var",
+      "name": "oozie_existing_mysql_database",
+      "displayName": "Database Type",
+      "value": "",
+      "defaultValue": "MySQL",
+      "description": "Using an existing MySQL database for Oozie Metastore",
+      "displayType": "masterHost",
+      "isVisible": false,
+      "isReconfigurable": false,
+      "isOverridable": false,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_database",
+      "displayName": "Oozie Database",
+      "value": "",
+      "defaultValue": "New Derby Database",
+      "options": [
+        {
+          displayName: 'New Derby Database',
+          foreignKeys: ['oozie_derby_database']
+        },
+        {
+          displayName: 'New MySQL Database',
+          foreignKeys: ['oozie_ambari_database', 'oozie_ambari_host'],
+          hidden: !App.supports.hiveOozieExtraDatabases
+        },
+        {
+          displayName: 'Existing MySQL Database',
+          foreignKeys: ['oozie_existing_mysql_database', 'oozie_existing_mysql_host'],
+          hidden: !App.supports.hiveOozieExtraDatabases
+        },
+        {
+          displayName: 'Existing Oracle Database',
+          foreignKeys: ['oozie_existing_oracle_database', 'oozie_existing_oracle_host'],
+          hidden: !App.supports.hiveOozieExtraDatabases
+        }
+      ],
+      "description": "Current Derby Database will be installed by Ambari",
+      "displayType": "radio button",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "radioName": "oozie-database",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_database_name",
+      "displayName": "Database Name",
+      "description": "Database name used for the Oozie",
+      "defaultValue": "oozie",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "host",
+      "isVisible": true,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_metastore_user_name",
+      "displayName": "Database Username",
+      "description": "Database user name to use to connect to the database",
+      "defaultValue": "oozie",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "user",
+      "isVisible": true,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_metastore_user_passwd",
+      "displayName": "Database Password",
+      "description": "Database password to use to connect to the database",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "password",
+      "isVisible": true,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_data_dir",
+      "displayName": "Oozie Data Dir",
+      "description": "Data directory in which the Oozie DB exists",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/oozie/data",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "isRequired": false,
+      "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server",
+      "index": 6
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_existing_mysql_host",
+      "displayName": "Database Host",
+      "description": "Specify the host on which the existing database is hosted",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "host",
+      "isVisible": false,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server"
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_existing_oracle_host",
+      "displayName": "Database Host",
+      "description": "Specify the host on which the existing database is hosted",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "host",
+      "isVisible": false,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server"
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_ambari_host",
+      "value": "",
+      "defaultValue": "",
+      "displayName": "Database Host",
+      "description": "Host on which the database will be created by Ambari",
+      "isReconfigurable": false,
+      "isOverridable": false,
+      "displayType": "masterHost",
+      "isVisible": false,
+      //"domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Oozie Server"
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_log_dir",
+      "displayName": "Oozie Log Dir",
+      "description": "Directory for oozie logs",
+      "defaultValue": "/var/log/oozie",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_pid_dir",
+      "displayName": "Oozie PID Dir",
+      "description": "Directory in which the pid files for oozie processes will be created",
+      "defaultValue": "/var/run/oozie",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "OOZIE",
+      "category": "Advanced"
+    },
+  /**********************************************NAGIOS***************************************/
+    {
+      "id": "puppet var",
+      "name": "nagios_web_login",
+      "displayName": "Nagios Admin username",
+      "description": "Nagios Web UI Admin username",
+      "defaultValue": "nagiosadmin",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "domain": "global",
+      "isVisible": true,
+      "serviceName": "NAGIOS",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "nagios_web_password",
+      "displayName": "Nagios Admin password",
+      "description": "Nagios Web UI Admin password",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "password",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "NAGIOS",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "nagios_contact",
+      "displayName": "Hadoop Admin email",
+      "description": "Hadoop Administrator email for alert notification",
+      "defaultValue": "",
+      "displayType": "email",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "NAGIOS",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "nagios_group",
+      "displayName": "Nagios Group",
+      "description": "Nagios Group",
+      "defaultValue": "nagios",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "domain": "global",
+      "isVisible": false,
+      "serviceName": "NAGIOS"
+    },
+  /**********************************************ZOOKEEPER***************************************/
+    {
+      "id": "puppet var",
+      "name": "zookeeperserver_hosts",
+      "displayName": "ZooKeeper Server hosts",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run ZooKeeper Server",
+      "displayType": "masterHosts",
+      "isVisible": true,
+      "isOverridable": false,
+      "isRequired": false,
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "zk_data_dir",
+      "displayName": "ZooKeeper directory",
+      "description": "Data directory for ZooKeeper",
+      "defaultValue": "",
+      "defaultDirectory": "/hadoop/zookeeper",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "tickTime",
+      "displayName": "Length of single Tick",
+      "description": "The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper",
+      "defaultValue": "2000",
+      "displayType": "int",
+      "unit": "ms",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 2
+    },
+    {
+      "id": "puppet var",
+      "name": "initLimit",
+      "displayName": "Ticks to allow for sync at Init",
+      "description": "Amount of time, in ticks to allow followers to connect and sync to a leader",
+      "defaultValue": "10",
+      "displayType": "int",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 3
+    },
+    {
+      "id": "puppet var",
+      "name": "syncLimit",
+      "displayName": "Ticks to allow for sync at Runtime",
+      "description": "Amount of time, in ticks to allow followers to connect",
+      "defaultValue": "5",
+      "displayType": "int",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 4
+    },
+    {
+      "id": "puppet var",
+      "name": "clientPort",
+      "displayName": "Port for running ZK Server",
+      "description": "Port for running ZooKeeper server",
+      "defaultValue": "2181",
+      "displayType": "int",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "ZooKeeper Server",
+      "index": 5
+    },
+    {
+      "id": "puppet var",
+      "name": "zk_log_dir",
+      "displayName": "ZooKeeper Log Dir",
+      "description": "Directory for ZooKeeper log files",
+      "defaultValue": "/var/log/zookeeper",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "Advanced",
+      "index": 0
+    },
+    {
+      "id": "puppet var",
+      "name": "zk_pid_dir",
+      "displayName": "ZooKeeper PID Dir",
+      "description": "Directory in which the pid files for zookeeper processes will be created",
+      "defaultValue": "/var/run/zookeeper",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "Advanced",
+      "index": 1
+    },
+    {
+      "id": "puppet var",
+      "name": "zk_pid_file",
+      "displayName": "ZooKeeper PID File",
+      "description": "",
+      "defaultValue": "/var/run/zookeeper/zookeeper_server.pid",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "ZOOKEEPER",
+      "category": "Advanced"
+    },
+  /**********************************************HUE***************************************/
+    {
+      "id": "puppet var",
+      "name": "hueserver_host",
+      "displayName": "Hue Server host",
+      "value": "",
+      "defaultValue": "",
+      "description": "The host that has been assigned to run Hue Server",
+      "displayType": "masterHost",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HUE",
+      "category": "Hue Server"
+    },
+    {
+      "id": "puppet var",
+      "name": "hue_log_dir",
+      "displayName": "HUE Log Dir",
+      "description": "Directory for HUE logs",
+      "defaultValue": "/var/log/hue",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HUE",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hue_pid_dir",
+      "displayName": "HUE Pid Dir",
+      "description": "Directory in which the pid files for HUE processes will be created",
+      "defaultValue": "/var/run/hue",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "HUE",
+      "category": "Advanced"
+    },
+  /**********************************************GANGLIA***************************************/
+    {
+      "id": "puppet var",
+      "name": "ganglia_conf_dir",
+      "displayName": "Ganglia conf directory",
+      "description": "",
+      "defaultValue": "/etc/ganglia/hdp",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "GANGLIA",
+      "category": "Advanced"
+    },
+  /**********************************************MISC***************************************/
+    {
+      "id": "puppet var",
+      "name": "hbase_conf_dir",
+      "displayName": "HBase conf dir",
+      "description": "",
+      "defaultValue": "/etc/hbase",
+      "isRequired": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "proxyuser_group",
+      "displayName": "Proxy group for Hive, WebHCat, and Oozie",
+      "description": "",
+      "defaultValue": "users",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "filename": "core-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_address",
+      "displayName": "dfs_datanode_address",
+      "description": "",
+      "defaultValue": "50010",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "dfs_datanode_http_address",
+      "displayName": "dfs_datanode_http_address",
+      "description": "",
+      "defaultValue": "50075",
+      "isReconfigurable": true,
+      "displayType": "int",
+      "isVisible": true,
+      "filename": "hdfs-site.xml",
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "gpl_artifacts_download_url",
+      "displayName": "gpl artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "apache_artifacts_download_url",
+      "displayName": "apache artifact download url",
+      "description": "",
+      "defaultValue": "",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "ganglia_runtime_dir",
+      "displayName": "Ganglia runtime directory",
+      "description": "",
+      "defaultValue": "/var/run/ganglia/hdp",
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    },
+    {
+      "id": "puppet var",
+      "name": "java64_home",
+      "displayName": "Path to 64-bit JAVA_HOME",
+      "description": "Path to 64-bit JAVA_HOME.  /usr/jdk/jdk1.6.0_31 is the default used by Ambari.  You can override this to a specific path that contains the JDK.  Note that the path must be valid on ALL hosts in your cluster.",
+      "defaultValue": "/usr/jdk64/jdk1.6.0_31",
+      "isRequired": true,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC"
+    },
+    {
+      "id": "puppet var",
+      "name": "run_dir",
+      "displayName": "Hadoop run directory",
+      "description": "",
+      "defaultValue": "/var/run/hadoop",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hadoop_conf_dir",
+      "displayName": "Hadoop conf directory",
+      "description": "",
+      "defaultValue": "/etc/hadoop",
+      "isRequired": false,
+      "isReconfigurable": false,
+      "displayType": "directory",
+      "isVisible": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Advanced"
+    },
+    {
+      "id": "puppet var",
+      "name": "hdfs_user",
+      "displayName": "HDFS User",
+      "description": "User to run HDFS as",
+      "defaultValue": "hdfs",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "serviceName": "MISC",
+      "domain": "global",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "mapred_user",
+      "displayName": "MapReduce User",
+      "description": "User to run MapReduce as",
+      "defaultValue": "mapred",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "hbase_user",
+      "displayName": "HBase User",
+      "description": "User to run HBase as",
+      "defaultValue": "hbase",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "hive_user",
+      "displayName": "Hive User",
+      "description": "User to run Hive as",
+      "defaultValue": "hive",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "hcat_user",
+      "displayName": "HCat User",
+      "description": "User to run HCatalog as",
+      "defaultValue": "hcat",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "webhcat_user",
+      "displayName": "WebHCat User",
+      "description": "User to run WebHCat as",
+      "defaultValue": "hcat",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "oozie_user",
+      "displayName": "Oozie User",
+      "description": "User to run Oozie as",
+      "defaultValue": "oozie",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "zk_user",
+      "displayName": "ZooKeeper User",
+      "description": "User to run ZooKeeper as",
+      "defaultValue": "zookeeper",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "gmetad_user",
+      "displayName": "Ganglia User",
+      "description": "The user used to run Ganglia",
+      "defaultValue": "nobody",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "gmond_user",
+      "displayName": "Gmond User",
+      "description": "The user used to run gmond for Ganglia",
+      "defaultValue": "nobody",
+      "isReconfigurable": false,
+      "displayType": "advanced",
+      "isOverridable": false,
+      "isVisible": false,
+      "domain": "global",
+      "serviceName":"MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "nagios_user",
+      "displayName": "Nagios User",
+      "description": "User to run Nagios as",
+      "defaultValue": "nagios",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "domain": "global",
+      "isVisible": true,
+      "serviceName":"MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "smokeuser",
+      "displayName": "Smoke Test User",
+      "description": "The user used to run service smoke tests",
+      "defaultValue": "ambari-qa",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": App.supports.customizeSmokeTestUser,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "user_group",
+      "displayName": "Group",
+      "description": "Group that the users specified above belong to",
+      "defaultValue": "hadoop",
+      "isReconfigurable": false,
+      "displayType": "user",
+      "isOverridable": false,
+      "isVisible": true,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "Users and Groups"
+    },
+    {
+      "id": "puppet var",
+      "name": "rrdcached_base_dir",
+      "displayName": "Ganglia rrd cached base directory",
+      "description": "Default directory for saving the rrd files on ganglia server",
+      "defaultValue": "/var/lib/ganglia/rrds",
+      "displayType": "directory",
+      "isOverridable": false,
+      "domain": "global",
+      "serviceName": "MISC",
+      "category": "General"
+    }
+  ]
+};

+ 12 - 4
ambari-web/app/data/service_configs.js

@@ -60,9 +60,12 @@ module.exports = [
     displayName: 'MapReduce 2',
     filename: 'mapred-site',
     configCategories: [
-      App.ServiceConfigCategory.create({ name: 'Advanced', displayName : 'Advanced'})
+      App.ServiceConfigCategory.create({ name: 'HistoryServer', displayName : 'History Server', hostComponentNames : ['HISTORYSERVER']}),
+      App.ServiceConfigCategory.create({ name: 'General', displayName : 'General'}),
+      App.ServiceConfigCategory.create({ name: 'Advanced', displayName : 'Advanced'}),
+      App.ServiceConfigCategory.create({ name: 'AdvancedMapredSite', displayName : 'Custom mapred-site.xml', siteFileName: 'mapred-site.xml', canAddProperty: true})
     ],
-    sites: ['global', 'core-site', 'mapred-site', 'mapred-queue-acls'],
+    sites: ['core-site', 'mapred-site', 'mapred-queue-acls'],
     configs: configProperties.filterProperty('serviceName', 'MAPREDUCE2')
   },
 
@@ -71,9 +74,14 @@ module.exports = [
     displayName: 'YARN',
     filename: 'yarn-site',
     configCategories: [
-      App.ServiceConfigCategory.create({ name: 'Advanced', displayName : 'Advanced'})
+      App.ServiceConfigCategory.create({ name: 'Advanced', displayName : 'Advanced'}),
+      App.ServiceConfigCategory.create({ name: 'General', displayName : 'General'}),
+      App.ServiceConfigCategory.create({ name: 'ResourceManager', displayName : 'Resource Manager', hostComponentNames : ['RESOURCEMANAGER']}),
+      App.ServiceConfigCategory.create({ name: 'NodeManager', displayName : 'Node Manager', hostComponentNames : ['NODEMANAGER']}),
+      App.ServiceConfigCategory.create({ name: 'CapacityScheduler', displayName : 'Capacity Scheduler', isCapacityScheduler : true, isCustomView: true, siteFileName: 'capacity-scheduler.xml', siteFileNames: ['capacity-scheduler.xml', 'mapred-queue-acls.xml'], canAddProperty: true}),
+      App.ServiceConfigCategory.create({ name: 'AdvancedYARNSite', displayName : 'Custom yarn-site.xml', siteFileName: 'yarn-site.xml', canAddProperty: true})
     ],
-    sites: ['global', 'yarn-site', 'capacity-scheduler'],
+    sites: ['core-site', 'yarn-site', 'capacity-scheduler'],
     configs: configProperties.filterProperty('serviceName', 'MAPREDUCE2')
   },
 

+ 10 - 1
ambari-web/app/models/service_config.js

@@ -244,6 +244,15 @@ App.ServiceConfigProperty = Ember.Object.extend({
       case 'datanode_hosts':
         this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'DATANODE').hosts.mapProperty('hostName'));
         break;
+      case 'historyserver_host':
+        this.set('value', masterComponentHostsInDB.filterProperty('component', 'HISTORYSERVER').mapProperty('hostName'));
+        break;
+      case 'resourcemanager_host':
+        this.set('value', masterComponentHostsInDB.findProperty('component', 'RESOURCEMANAGER').hostName);
+        break;
+      case 'nodemanager_hosts':
+        this.set('value', slaveComponentHostsInDB.findProperty('componentName', 'NODEMANAGER').hosts.mapProperty('hostName'));
+        break;
       case 'jobtracker_host':
         this.set('value', masterComponentHostsInDB.findProperty('component', 'JOBTRACKER').hostName);
         break;
@@ -331,7 +340,7 @@ App.ServiceConfigProperty = Ember.Object.extend({
         }, this);
         break;
       case 'mapred_local_dir':
-        temp = slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER');
+        temp = slaveComponentHostsInDB.findProperty('componentName', 'TASKTRACKER') || slaveComponentHostsInDB.findProperty('componentName', 'NODEMANAGER');
         temp.hosts.forEach(function (host) {
           setOfHostNames.push(host.hostName);
         }, this);

+ 3 - 1
ambari-web/app/routes/main.js

@@ -17,6 +17,7 @@
  */
 
 var App = require('app');
+var stringUtils = require('utils/string_utils');
 
 module.exports = Em.Route.extend({
   route: '/main',
@@ -97,7 +98,8 @@ module.exports = Em.Route.extend({
   apps: Em.Route.extend({
     route: '/apps',
     connectOutlets: function (router) {
-      if (App.get('currentStackVersionNumber') >= '2.0.0') {
+      if (stringUtils.compareVersions(App.get('currentStackVersionNumber'), "2.0") === 1 ||
+        stringUtils.compareVersions(App.get('currentStackVersionNumber'), "2.0") === 0) {
         Em.run.next(function () {
           router.transitionTo('main.dashboard');
         });

+ 11 - 4
ambari-web/app/utils/config.js

@@ -17,6 +17,7 @@
  */
 
 var App = require('app');
+var stringUtils = require('utils/string_utils');
 
 var serviceComponents = {};
 var configGroupsByTag = [];
@@ -26,7 +27,13 @@ App.config = Em.Object.create({
 
   preDefinedServiceConfigs: require('data/service_configs'),
   configMapping: require('data/config_mapping'),
-  preDefinedConfigProperties: require('data/config_properties').configProperties,
+  preDefinedConfigProperties: function() {
+    if (stringUtils.compareVersions(App.get('currentStackVersionNumber'), "2.0") === 1 ||
+      stringUtils.compareVersions(App.get('currentStackVersionNumber'), "2.0") === 0) {
+      return require('data/HDP2/config_properties').configProperties;
+    }
+    return require('data/config_properties').configProperties;
+  }.property('App.currentStackVersionNumber'),
   preDefinedCustomConfigs: require('data/custom_configs'),
   //categories which contain custom configs
   categoriesWithCustom: ['CapacityScheduler'],
@@ -248,13 +255,13 @@ App.config = Em.Object.create({
    * @param serviceName
    */
   addAdvancedConfigs: function (serviceConfigs, advancedConfigs, serviceName) {
-    serviceConfigs = (serviceName) ? serviceConfigs.filterProperty('serviceName', serviceName) : serviceConfigs;
+    var configsToVerifying = (serviceName) ? serviceConfigs.filterProperty('serviceName', serviceName) : serviceConfigs;
     advancedConfigs.forEach(function (_config) {
       var configCategory = 'Advanced';
       var categoryMetaData = null;
       if (_config) {
         if (this.get('configMapping').computed().someProperty('name', _config.name)) {
-        } else if (!(serviceConfigs.someProperty('name', _config.name))) {
+        } else if (!(configsToVerifying.someProperty('name', _config.name))) {
           if(this.get('customFileNames').contains(_config.filename)){
             categoryMetaData = this.identifyCategory(_config);
             if (categoryMetaData != null) {
@@ -484,7 +491,7 @@ App.config = Em.Object.create({
       data: {
         serviceName: serviceName,
         stack2VersionUrl: App.get('stack2VersionURL'),
-        stackVersion: App.currentStackVersion.replace(/HDP(Local)?-/, '')
+        stackVersion: App.get('currentStackVersionNumber')
       },
       success: 'loadAdvancedConfigSuccess'
     });

+ 40 - 0
ambari-web/app/utils/string_utils.js

@@ -59,5 +59,45 @@ module.exports = {
       return str[1].toUpperCase();
     }
     return new_name;
+  },
+  /**
+   * Compare two versions by following rules:
+   * first higher than second then return 1
+   * first lower than second then return -1
+   * first equal to second then return 0
+   * @param first {string}
+   * @param second {string}
+   * @return {number}
+   */
+  compareVersions: function(first, second){
+    if (!(typeof first === 'string' && typeof second === 'string')) {
+      return false;
+    }
+    var firstNumbers = first.split('.');
+    var secondNumbers = second.split('.');
+    var length = 0;
+    var i = 0;
+    var result = false;
+    if(firstNumbers.length === secondNumbers.length) {
+      length = firstNumbers.length;
+    } else if(firstNumbers.length < secondNumbers.length){
+      length = secondNumbers.length;
+    } else {
+      length = firstNumbers.length;
+    }
+
+    while(i < length && !result){
+      firstNumbers[i] = (firstNumbers[i] === undefined) ? 0 : window.parseInt(firstNumbers[i]);
+      secondNumbers[i] = (secondNumbers[i] === undefined) ? 0 : window.parseInt(secondNumbers[i]);
+      if(firstNumbers[i] > secondNumbers[i]){
+        result = 1;
+      } else if(firstNumbers[i] === secondNumbers[i]){
+        result = 0;
+      } else if(firstNumbers[i] < secondNumbers[i]){
+        result = -1;
+      }
+      i++;
+    }
+    return result;
   }
 }

+ 2 - 1
ambari-web/app/views/main/menu.js

@@ -17,6 +17,7 @@
  */
 
 var App = require('app');
+var stringUtils = require('utils/string_utils');
 
 /**
  * this menu extended by other with modifying content and itemViewClass.template
@@ -37,7 +38,7 @@ App.MainMenuView = Em.CollectionView.extend({
       result.push({ label:Em.I18n.t('menu.item.mirroring'), routing:'mirroring'});
     }
 
-    if (App.get('currentStackVersionNumber') < '2.0.0') {
+    if (stringUtils.compareVersions(App.get('currentStackVersionNumber'), "2.0") === -1) {
       result.push({ label:Em.I18n.t('menu.item.jobs'), routing:'apps'});
     }
 

+ 16 - 0
ambari-web/test/utils/string_utils_test.js

@@ -53,4 +53,20 @@ describe('string_utils', function () {
     });
   });
 
+  describe('#compareVersions', function () {
+    var tests = [
+      {m: '1.2 equal to 1.2', v1:'1.2', v2:'1.2', e: 0},
+      {m: '1.2 lower than 1.3', v1:'1.2', v2:'1.3', e: -1},
+      {m: '1.3 higher than 1.2', v1:'1.3', v2:'1.2', e: 1},
+      {m: '1.2.1 higher than 1.2', v1:'1.2.1', v2:'1.2', e: 1},
+      {m: '11.2 higher than 2.2', v1:'11.2', v2:'2.2', e: 1},
+      {m: '0.9 higher than 0.8', v1:'0.9', v2:'0.8', e: 1}
+    ];
+    tests.forEach(function(test) {
+      it(test.m + ' ', function () {
+        expect(string_utils.compareVersions(test.v1, test.v2)).to.equal(test.e);
+      });
+    });
+  });
+
 });