Browse Source

AMBARI-2188. Update mock json data for Test mode. (srimanth)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1485469 13f79535-47bb-0310-9956-ffa450edef68
Srimanth 12 years ago
parent
commit
24607b5224
53 changed files with 8120 additions and 891 deletions
  1. 2 0
      CHANGES.txt
  2. 137 0
      ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.1.json
  3. 148 0
      ambari-web/app/assets/data/wizard/stack/hdp/version/2.0.1.json
  4. 113 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HBASE.json
  5. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HCATALOG.json
  6. 533 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HDFS.json
  7. 149 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HIVE.json
  8. 725 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/MAPREDUCE.json
  9. 317 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/OOZIE.json
  10. 173 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/WEBHCAT.json
  11. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/ZOOKEEPER.json
  12. 65 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/GANGLIA.json
  13. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HBASE.json
  14. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HCATALOG.json
  15. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json
  16. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json
  17. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HUE.json
  18. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/MAPREDUCE.json
  19. 41 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/NAGIOS.json
  20. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/OOZIE.json
  21. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/PIG.json
  22. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/SQOOP.json
  23. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/WEBHCAT.json
  24. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/ZOOKEEPER.json
  25. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/global.json
  26. 0 60
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HBASE.json
  27. 0 20
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HCATALOG.json
  28. 0 210
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HDFS.json
  29. 0 95
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HIVE.json
  30. 0 230
      ambari-web/app/assets/data/wizard/stack/hdp/version122/MAPREDUCE.json
  31. 0 155
      ambari-web/app/assets/data/wizard/stack/hdp/version122/OOZIE.json
  32. 0 90
      ambari-web/app/assets/data/wizard/stack/hdp/version122/WEBHCAT.json
  33. 0 25
      ambari-web/app/assets/data/wizard/stack/hdp/version122/ZOOKEEPER.json
  34. 65 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/GANGLIA.json
  35. 281 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HBASE.json
  36. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HCATALOG.json
  37. 737 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json
  38. 209 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HIVE.json
  39. 353 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HUE.json
  40. 545 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCEv2.json
  41. 41 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/NAGIOS.json
  42. 317 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/OOZIE.json
  43. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/PIG.json
  44. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/SQOOP.json
  45. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/TEZ.json
  46. 173 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/WEBHCAT.json
  47. 461 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/YARN.json
  48. 4 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/ZOOKEEPER.json
  49. 2490 0
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/global.json
  50. 2 1
      ambari-web/app/controllers/wizard.js
  51. 3 2
      ambari-web/app/controllers/wizard/step3_controller.js
  52. 2 2
      ambari-web/app/utils/ajax.js
  53. 2 1
      ambari-web/app/utils/config.js

+ 2 - 0
CHANGES.txt

@@ -306,6 +306,8 @@ Trunk (unreleased changes):
 
 
  IMPROVEMENTS
  IMPROVEMENTS
 
 
+ AMBARI-2188. Update mock json data for Test mode. (srimanth) 
+
  AMBARI-2169. Going from Hosts page to Host Details page and back should
  AMBARI-2169. Going from Hosts page to Host Details page and back should
  preserve the filters, sort order, and pagination. (yusaku)
  preserve the filters, sort order, and pagination. (yusaku)
 
 

+ 137 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.1.json

@@ -0,0 +1,137 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices?fields=StackServices",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for WEBHCAT service",
+        "service_version" : "0.5.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/GANGLIA",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "GANGLIA",
+        "stack_name" : "HDP",
+        "comments" : "Ganglia Metrics Collection system",
+        "service_version" : "3.2.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/NAGIOS",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "NAGIOS",
+        "stack_name" : "HDP",
+        "comments" : "Nagios Monitoring and Alerting system",
+        "service_version" : "3.2.3"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE",
+      "StackServices" : {
+        "user_name" : "mapred",
+        "stack_version" : "1.2.1",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+        "service_version" : "0.94.5"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/SQOOP",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "SQOOP",
+        "stack_name" : "HDP",
+        "comments" : "Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases",
+        "service_version" : "1.4.2"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "comments" : "Apache Hadoop Distributed File System",
+        "service_version" : "1.1.2"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE",
+      "StackServices" : {
+        "user_name" : "mapred",
+        "stack_version" : "1.2.1",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "comments" : "Apache Hadoop Distributed Processing Framework",
+        "service_version" : "1.1.2"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/PIG",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "PIG",
+        "stack_name" : "HDP",
+        "comments" : "Scripting platform for analyzing large datasets",
+        "service_version" : "0.10.1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/ZOOKEEPER",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "ZOOKEEPER",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for ZOOKEEPER service",
+        "service_version" : "3.4.5"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs",
+        "service_version" : "3.2.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCATALOG",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "HCATALOG",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for HCATALOG service",
+        "service_version" : "0.5.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "1.2.1",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
+        "service_version" : "0.10.0"
+      }
+    }
+  ]
+}

+ 148 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version/2.0.1.json

@@ -0,0 +1,148 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices?fields=StackServices",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2",
+      "StackServices" : {
+        "user_name" : "mapred",
+        "stack_version" : "2.0.1",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "comments" : "Apache Hadoop NextGen MapReduce (client libraries)",
+        "service_version" : "2.0.3.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/OOZIE",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs",
+        "service_version" : "3.3.1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/PIG",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "PIG",
+        "stack_name" : "HDP",
+        "comments" : "Scripting platform for analyzing large datasets",
+        "service_version" : "0.10.1.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HCATALOG",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "HCATALOG",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for HCATALOG service",
+        "service_version" : "0.5.0.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/WEBHCAT",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for WEBHCAT service",
+        "service_version" : "0.5.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/ZOOKEEPER",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "ZOOKEEPER",
+        "stack_name" : "HDP",
+        "comments" : "This is comment for ZOOKEEPER service",
+        "service_version" : "3.4.5.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/GANGLIA",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "GANGLIA",
+        "stack_name" : "HDP",
+        "comments" : "Ganglia Metrics Collection system",
+        "service_version" : "3.2.0"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HBASE",
+      "StackServices" : {
+        "user_name" : "mapred",
+        "stack_version" : "2.0.1",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+        "service_version" : "0.94.5.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HIVE",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
+        "service_version" : "0.10.0.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN",
+      "StackServices" : {
+        "user_name" : "mapred",
+        "stack_version" : "2.0.1",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "comments" : "Apache Hadoop NextGen MapReduce (YARN)",
+        "service_version" : "2.0.3.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/NAGIOS",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "NAGIOS",
+        "stack_name" : "HDP",
+        "comments" : "Nagios Monitoring and Alerting system",
+        "service_version" : "3.2.3"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/TEZ",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "TEZ",
+        "stack_name" : "HDP",
+        "comments" : "Tez is the next generation Hadoop Query Processing framework written on top of YARN",
+        "service_version" : "0.1.0.22-1"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HDFS",
+      "StackServices" : {
+        "user_name" : "root",
+        "stack_version" : "2.0.1",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "comments" : "Apache Hadoop Distributed File System",
+        "service_version" : "2.0.3.22-1"
+      }
+    }
+  ]
+}

+ 113 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HBASE.json

@@ -0,0 +1,113 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
+      "StackConfigurations" : {
+        "property_description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
+        "property_value" : "3000",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.master.lease.thread.wakefrequency",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.superuser",
+      "StackConfigurations" : {
+        "property_description" : "List of users or groups (comma-separated), who are allowed\n    full privileges, regardless of stored ACLs, across the cluster.\n    Only used when HBase security is enabled.\n    ",
+        "property_value" : "hbase",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.superuser",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.client.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HRegionInterface protocol implementations (ie. \n    clients talking to HRegionServers)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.client.protocol.acl",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.admin.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HMasterInterface protocol implementation (ie. \n    clients talking to HMaster for admin operations).\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.admin.protocol.acl",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HMasterRegionInterface protocol implementations\n    (for HRegionServers communicating with HMaster)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.masterregion.protocol.acl",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
+      "StackConfigurations" : {
+        "property_description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.zookeeper.useMulti",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
+      "StackConfigurations" : {
+        "property_description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
+        "property_value" : "2181",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.zookeeper.property.clientPort",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.cluster.distributed",
+      "StackConfigurations" : {
+        "property_description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.cluster.distributed",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
+      "StackConfigurations" : {
+        "property_description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
+        "property_value" : "10000",
+        "stack_version" : "1.2.1",
+        "property_name" : "hbase.regionserver.optionalcacheflushinterval",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP",
+        "type" : "hbase-site.xml"
+      }
+    }
+  ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HCATALOG.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCATALOG/configurations?fields=*",
+  "items" : [ ]
+}

+ 533 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HDFS.json

@@ -0,0 +1,533 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.client.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.client.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
+        "property_value" : "6250000",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.balance.bandwidthPerSec",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.block.size",
+      "StackConfigurations" : {
+        "property_description" : "The default block size for new files.",
+        "property_value" : "134217728",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.block.size",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.secondary.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where secondary-namenode binds",
+        "property_value" : "50490",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.secondary.https.port",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
+        "property_value" : "536870912",
+        "stack_version" : "1.2.1",
+        "property_name" : "fs.checkpoint.size",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.period",
+      "StackConfigurations" : {
+        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
+        "property_value" : "21600",
+        "stack_version" : "1.2.1",
+        "property_name" : "fs.checkpoint.period",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+      "StackConfigurations" : {
+        "property_description" : "PRIVATE CONFIG VARIABLE",
+        "property_value" : "4096",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.datanode.max.xcievers",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+      "StackConfigurations" : {
+        "property_description" : "The name of the group of super-users.",
+        "property_value" : "hdfs",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.permissions.supergroup",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.access.time.precision",
+      "StackConfigurations" : {
+        "property_description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
+        "property_value" : "0",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.access.time.precision",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/webinterface.private.actions",
+      "StackConfigurations" : {
+        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "webinterface.private.actions",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.web.ugi",
+      "StackConfigurations" : {
+        "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+        "property_value" : "gopher,gopher",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.web.ugi",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.umaskmode",
+      "StackConfigurations" : {
+        "property_description" : "\nThe octal umask used when creating files and directories.\n",
+        "property_value" : "077",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.umaskmode",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+      "StackConfigurations" : {
+        "property_description" : "DFS Client write socket timeout",
+        "property_value" : "0",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.datanode.socket.write.timeout",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.block.access.token.enable",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.task.umbilical.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.inter.tracker.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+      "StackConfigurations" : {
+        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
+        "property_value" : "0.85f",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.datanode.du.pct",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.file.buffer.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
+        "property_value" : "131072",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.file.buffer.size",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.inter.datanode.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.permissions",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.permissions",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+      "StackConfigurations" : {
+        "property_description" : "Defines the maximum number of retries for IPC connections.",
+        "property_value" : "50",
+        "stack_version" : "1.2.1",
+        "property_name" : "ipc.client.connect.max.retries",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "Added to grow Queue size so that more client connections are allowed",
+        "property_value" : "100",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.namenode.handler.count",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.job.submission.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+      "StackConfigurations" : {
+        "property_description" : "Delay for first block report in seconds.",
+        "property_value" : "120",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.blockreport.initialDelay",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+      "StackConfigurations" : {
+        "property_description" : "Determines datanode heartbeat interval in seconds.",
+        "property_value" : "3",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.heartbeat.interval",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+      "StackConfigurations" : {
+        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
+        "property_value" : "30000",
+        "stack_version" : "1.2.1",
+        "property_name" : "ipc.client.connection.maxidletime",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.compression.codecs",
+      "StackConfigurations" : {
+        "property_description" : "A list of the compression codec classes that can be used\n                 for compression/decompression.",
+        "property_value" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.compression.codecs",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.server.max.response.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5242880",
+        "stack_version" : "1.2.1",
+        "property_name" : "ipc.server.max.response.size",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.namenode.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5",
+        "stack_version" : "1.2.1",
+        "property_name" : "ipc.server.read.threadpool.size",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+      "StackConfigurations" : {
+        "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+        "property_value" : "0.0.0.0:8010",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.datanode.ipc.address",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.cluster.administrators",
+      "StackConfigurations" : {
+        "property_description" : "ACL for who all can view the default servlets in the HDFS",
+        "property_value" : " hdfs",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.cluster.administrators",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.trash.interval",
+      "StackConfigurations" : {
+        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
+        "property_value" : "360",
+        "stack_version" : "1.2.1",
+        "property_name" : "fs.trash.interval",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+      "StackConfigurations" : {
+        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
+        "property_value" : "8000",
+        "stack_version" : "1.2.1",
+        "property_name" : "ipc.client.idlethreshold",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.datanode.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "The number of server threads for the namenode.",
+        "property_value" : "40",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.namenode.handler.count",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
+        "property_value" : "1.0f",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.safemode.threshold.pct",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.replication.max",
+      "StackConfigurations" : {
+        "property_description" : "Maximal block replication.\n  ",
+        "property_value" : "50",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.replication.max",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "security.client.datanode.protocol.acl",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hadoop-policy.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.serializations",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.serializations",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+      "StackConfigurations" : {
+        "property_description" : "The implementation for lzo codec.",
+        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.compression.codec.lzo.class",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where namenode binds",
+        "property_value" : "50470",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.https.port",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+      "StackConfigurations" : {
+        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
+        "property_value" : "${fs.checkpoint.dir}",
+        "stack_version" : "1.2.1",
+        "property_name" : "fs.checkpoint.edits.dir",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "core-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+      "StackConfigurations" : {
+        "property_description" : "Number of failed disks datanode would tolerate",
+        "property_value" : "0",
+        "stack_version" : "1.2.1",
+        "property_name" : "dfs.datanode.failed.volumes.tolerated",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP",
+        "type" : "hdfs-site.xml"
+      }
+    }
+  ]
+}

+ 149 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HIVE.json

@@ -0,0 +1,149 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
+      "StackConfigurations" : {
+        "property_description" : "MetaStore Client socket timeout in seconds",
+        "property_value" : "60",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.metastore.client.socket.timeout",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.security.authorization.manager",
+      "StackConfigurations" : {
+        "property_description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
+        "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.security.authorization.manager",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.security.authorization.enabled",
+      "StackConfigurations" : {
+        "property_description" : "enable or disable the hive client authorization",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.security.authorization.enabled",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
+      "StackConfigurations" : {
+        "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
+        "property_value" : "Table,Database,Type,FieldSchema,Order",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.metastore.cache.pinobjtypes",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
+      "StackConfigurations" : {
+        "property_description" : "FS operations are owned by client",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hadoop.clientside.fs.operations",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "fs.hdfs.impl.disable.cache",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
+      "StackConfigurations" : {
+        "property_description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
+        "property_value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.semantic.analyzer.factory.impl",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.local",
+      "StackConfigurations" : {
+        "property_description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.metastore.local",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
+      "StackConfigurations" : {
+        "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.metastore.execute.setugi",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
+      "StackConfigurations" : {
+        "property_description" : "location of default database for the warehouse",
+        "property_value" : "/apps/hive/warehouse",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.metastore.warehouse.dir",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
+      "StackConfigurations" : {
+        "property_description" : "Driver class name for a JDBC metastore",
+        "property_value" : "com.mysql.jdbc.Driver",
+        "stack_version" : "1.2.1",
+        "property_name" : "javax.jdo.option.ConnectionDriverName",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.server2.enable.doAs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "hive.server2.enable.doAs",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP",
+        "type" : "hive-site.xml"
+      }
+    }
+  ]
+}

+ 725 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/MAPREDUCE.json

@@ -0,0 +1,725 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.maximum-system-jobs",
+      "StackConfigurations" : {
+        "property_description" : "Maximum number of jobs in the system which can be initialized,\n     concurrently, by the CapacityScheduler.\n    ",
+        "property_value" : "3000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.maximum-system-jobs",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+      "StackConfigurations" : {
+        "property_description" : "The default maximum number of tasks per-user, across all the of \n    the user's jobs in the queue, which can be initialized concurrently. Once \n    the user's jobs exceed this limit they will be queued on disk.  \n    ",
+        "property_value" : "100000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
+        "property_value" : "50",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.tracker.handler.count",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.healthChecker.interval",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "135000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.healthChecker.interval",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.restart.recover",
+      "StackConfigurations" : {
+        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.restart.recover",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.hours",
+      "StackConfigurations" : {
+        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
+        "property_value" : "1",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-poll-interval",
+      "StackConfigurations" : {
+        "property_description" : "The amount of time in miliseconds which is used to poll \n    the job queues for jobs to initialize.\n    ",
+        "property_value" : "5000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.init-poll-interval",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.parallel.copies",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "30",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.reduce.parallel.copies",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.reuse.jvm.num.tasks",
+      "StackConfigurations" : {
+        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
+        "property_value" : "1",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.reuse.jvm.num.tasks",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.capacity",
+      "StackConfigurations" : {
+        "property_description" : "Percentage of the number of slots in the cluster that are\n      to be available for jobs in this queue.\n    ",
+        "property_value" : "100",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.capacity",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.history.completed.location",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "/mapred/history/done",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.tracker.history.completed.location",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.supports-priority",
+      "StackConfigurations" : {
+        "property_description" : "If true, priorities of jobs will be taken into \n      account in scheduling decisions.\n    ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.supports-priority",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/tasktracker.http.threads",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "50",
+        "stack_version" : "1.2.1",
+        "property_name" : "tasktracker.http.threads",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+      "StackConfigurations" : {
+        "property_description" : "The default maximum number of tasks, across all jobs in the \n    queue, which can be initialized concurrently. Once the queue's jobs exceed \n    this limit they will be queued on disk.  \n    ",
+        "property_value" : "200000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.cluster.administrators",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : " hadoop",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.cluster.administrators",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+      "StackConfigurations" : {
+        "property_description" : "The multipe of (maximum-system-jobs * queue-capacity) used to \n    determine the number of jobs which are accepted by the scheduler.  \n    ",
+        "property_value" : "10",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+      "StackConfigurations" : {
+        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
+        "property_value" : "180",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+      "StackConfigurations" : {
+        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
+        "property_value" : "250",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-administer-jobs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.queue.default.acl-administer-jobs",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-queue-acls.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-user-limit-factor",
+      "StackConfigurations" : {
+        "property_description" : "The default multiple of queue-capacity which is used to \n    determine the amount of slots a single user can consume concurrently.\n    ",
+        "property_value" : "1",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-user-limit-factor",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.max.tracker.blacklists",
+      "StackConfigurations" : {
+        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
+        "property_value" : "16",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.max.tracker.blacklists",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.map.output.compression.codec",
+      "StackConfigurations" : {
+        "property_description" : "If the map outputs are compressed, how should they be\n      compressed\n    ",
+        "property_value" : "org.apache.hadoop.io.compress.SnappyCodec",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.map.output.compression.codec",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.healthChecker.script.timeout",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "60000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.healthChecker.script.timeout",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/jetty.connector",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
+        "stack_version" : "1.2.1",
+        "property_name" : "jetty.connector",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-supports-priority",
+      "StackConfigurations" : {
+        "property_description" : "If true, priorities of jobs will be taken into \n      account in scheduling decisions by default in a job queue.\n    ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-supports-priority",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-capacity",
+      "StackConfigurations" : {
+        "property_description" : "\n\tmaximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.\n\tThis provides a means to limit how much excess capacity a queue can use. By default, there is no limit.\n\tThe maximum-capacity of a queue can only be greater than or equal to its minimum capacity.\n        Default value of -1 implies a queue can use complete capacity of the cluster.\n\n        This property could be to curtail certain jobs which are long running in nature from occupying more than a \n        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of \n        other queues being affected.\n        \n        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity\n        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in \n        absolute terms would increase accordingly.\n    ",
+        "property_value" : "-1",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-capacity",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.child.root.logger",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "INFO,TLA",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.child.root.logger",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-minimum-user-limit-percent",
+      "StackConfigurations" : {
+        "property_description" : "The percentage of the resources limited to a particular user\n      for the job queue at any given point of time by default.\n    ",
+        "property_value" : "100",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-minimum-user-limit-percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/hadoop.job.history.user.location",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "none",
+        "stack_version" : "1.2.1",
+        "property_name" : "hadoop.job.history.user.location",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.input.buffer.percent",
+      "StackConfigurations" : {
+        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
+        "property_value" : "0.7",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.shuffle.input.buffer.percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.slowstart.completed.maps",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "0.05",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.reduce.slowstart.completed.maps",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.history.server.embedded",
+      "StackConfigurations" : {
+        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.history.server.embedded",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/io.sort.factor",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "100",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.sort.factor",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.tasks.speculative.execution",
+      "StackConfigurations" : {
+        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.reduce.tasks.speculative.execution",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.active",
+      "StackConfigurations" : {
+        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.inmem.merge.threshold",
+      "StackConfigurations" : {
+        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
+        "property_value" : "1000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.inmem.merge.threshold",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-worker-threads",
+      "StackConfigurations" : {
+        "property_description" : "Number of worker threads which would be used by\n    Initialization poller to initialize jobs in a set of queue.\n    If number mentioned in property is equal to number of job queues\n    then a single thread would initialize jobs in a queue. If lesser\n    then a thread would get a set of queues assigned. If the number\n    is greater then number of threads would be equal to number of \n    job queues.\n    ",
+        "property_value" : "5",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.init-worker-threads",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+      "StackConfigurations" : {
+        "property_description" : "The maximum number of tasks, across all jobs in the queue, \n    which can be initialized concurrently. Once the queue's jobs exceed this \n    limit they will be queued on disk.  \n    ",
+        "property_value" : "200000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-submit-job",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "*",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.queue.default.acl-submit-job",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-queue-acls.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.reduce.input.buffer.percent",
+      "StackConfigurations" : {
+        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
+        "property_value" : "0.0",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.reduce.input.buffer.percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.completeuserjobs.maximum",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.names",
+      "StackConfigurations" : {
+        "property_description" : " Comma separated list of queues configured for this jobtracker.",
+        "property_value" : "default",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.queue.names",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+      "StackConfigurations" : {
+        "property_description" : "The maximum number of tasks per-user, across all the of the \n    user's jobs in the queue, which can be initialized concurrently. Once the \n    user's jobs exceed this limit they will be queued on disk.  \n    ",
+        "property_value" : "100000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.map.tasks.speculative.execution",
+      "StackConfigurations" : {
+        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.map.tasks.speculative.execution",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-init-accept-jobs-factor",
+      "StackConfigurations" : {
+        "property_description" : "The default multipe of (maximum-system-jobs * queue-capacity) \n    used to determine the number of jobs which are accepted by the scheduler.  \n    ",
+        "property_value" : "10",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.default-init-accept-jobs-factor",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+      "StackConfigurations" : {
+        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
+        "property_value" : "15",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.tasktracker.group",
+      "StackConfigurations" : {
+        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+        "property_value" : "hadoop",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.tasktracker.group",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+      "StackConfigurations" : {
+        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
+        "property_value" : "50000000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.merge.percent",
+      "StackConfigurations" : {
+        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
+        "property_value" : "0.66",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.job.shuffle.merge.percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.reduce.input.limit",
+      "StackConfigurations" : {
+        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
+        "property_value" : "10737418240",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.reduce.input.limit",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.user-limit-factor",
+      "StackConfigurations" : {
+        "property_description" : "The multiple of the queue capacity which can be configured to \n    allow a single user to acquire more slots. \n    ",
+        "property_value" : "1",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.user-limit-factor",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/io.sort.record.percent",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : ".2",
+        "stack_version" : "1.2.1",
+        "property_name" : "io.sort.record.percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.output.compression.type",
+      "StackConfigurations" : {
+        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
+        "property_value" : "BLOCK",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.output.compression.type",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.task.timeout",
+      "StackConfigurations" : {
+        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
+        "property_value" : "600000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.task.timeout",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.check",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "10000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.retirejob.check",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+      "StackConfigurations" : {
+        "property_description" : " Each queue enforces a limit on the percentage of resources \n    allocated to a user at any given time, if there is competition for them. \n    This user limit can vary between a minimum and maximum value. The former\n    depends on the number of users who have submitted jobs, and the latter is\n    set to this property value. For example, suppose the value of this \n    property is 25. If two users have submitted jobs to a queue, no single \n    user can use more than 50% of the queue resources. If a third user submits\n    a job, no single user can use more than 33% of the queue resources. With 4 \n    or more users, no user can use more than 25% of the queue's resources. A \n    value of 100 implies no user limits are imposed. \n    ",
+        "property_value" : "100",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.interval",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "21600000",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.jobtracker.retirejob.interval",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.system.dir",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "/mapred/system",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapred.system.dir",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.staging.root.dir",
+      "StackConfigurations" : {
+        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
+        "property_value" : "/user",
+        "stack_version" : "1.2.1",
+        "property_name" : "mapreduce.jobtracker.staging.root.dir",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    }
+  ]
+}

+ 317 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/OOZIE.json

@@ -0,0 +1,317 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
+      "StackConfigurations" : {
+        "property_description" : "\n             Max number of connections.\n        ",
+        "property_value" : "10",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.pool.max.active.conn",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
+      "StackConfigurations" : {
+        "property_description" : "\n     Jobs older than this value, in days, will be purged by the PurgeService.\n     ",
+        "property_value" : "30",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.PurgeService.older.than",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.system.id",
+      "StackConfigurations" : {
+        "property_description" : "\n    The Oozie system ID.\n    ",
+        "property_value" : "oozie-${user.name}",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.system.id",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
+      "StackConfigurations" : {
+        "property_description" : "The mapping from kerberos principal names to local OS user names.",
+        "property_value" : "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT\n        ",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.authentication.kerberos.name.rules",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.base.url",
+      "StackConfigurations" : {
+        "property_description" : "Base Oozie URL.",
+        "property_value" : "http://localhost:11000/oozie",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.base.url",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
+      "StackConfigurations" : {
+        "property_description" : "\n            DB user password.\n\n            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n                       if empty Configuration assumes it is NULL.\n        ",
+        "property_value" : " ",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.jdbc.password",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
+      "StackConfigurations" : {
+        "property_description" : "\n     Maximum concurrency for a given callable type.\n     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n     All commands that use action executors (action-start, action-end, action-kill and action-check) use\n     the action type as the callable type.\n     ",
+        "property_value" : "3",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.db.schema.name",
+      "StackConfigurations" : {
+        "property_description" : "\n      Oozie DataBase Name\n     ",
+        "property_value" : "oozie",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.db.schema.name",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
+      "StackConfigurations" : {
+        "property_description" : "\n            DB user name.\n        ",
+        "property_value" : "sa",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.jdbc.username",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
+      "StackConfigurations" : {
+        "property_description" : "\n      Whitelisted job tracker for Oozie service.\n      ",
+        "property_value" : " ",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
+      "StackConfigurations" : {
+        "property_description" : "\n            JDBC URL.\n        ",
+        "property_value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.jdbc.url",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
+      "StackConfigurations" : {
+        "property_description" : "\n     Specifies whether security (user name/admin role) is enabled or not.\n     If disabled any user can manage Oozie system and manage any job.\n     ",
+        "property_value" : "true",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.AuthorizationService.security.enabled",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
+      "StackConfigurations" : {
+        "property_description" : "\n            Creates Oozie DB.\n\n            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n        ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.create.db.schema",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
+      "StackConfigurations" : {
+        "property_description" : "\n            JDBC driver class.\n        ",
+        "property_value" : "org.apache.derby.jdbc.EmbeddedDriver",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.JPAService.jdbc.driver",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor\n        ",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.ActionService.executor.ext.classes",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.systemmode",
+      "StackConfigurations" : {
+        "property_description" : "\n     System mode for  Oozie at startup.\n     ",
+        "property_value" : "NORMAL",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.systemmode",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
+      "StackConfigurations" : {
+        "property_description" : "Max callable queue size",
+        "property_value" : "1000",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.CallableQueueService.queue.size",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
+      "StackConfigurations" : {
+        "property_description" : "\n      ",
+        "property_value" : " ",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
+      "StackConfigurations" : {
+        "property_description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n      ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
+      "StackConfigurations" : {
+        "property_description" : "\n     Interval at which the purge service will run, in seconds.\n     ",
+        "property_value" : "3600",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.PurgeService.purge.interval",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.authentication.type",
+      "StackConfigurations" : {
+        "property_description" : "\n      ",
+        "property_value" : "simple",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.authentication.type",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
+      "StackConfigurations" : {
+        "property_description" : "Number of threads used for executing callables",
+        "property_value" : "10",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.CallableQueueService.threads",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
+      "StackConfigurations" : {
+        "property_description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n      -1 means infinite timeout",
+        "property_value" : "120",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.coord.normal.default.timeout",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
+      "StackConfigurations" : {
+        "property_description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n      ",
+        "property_value" : "/user/${user.name}/share/lib",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.WorkflowAppService.system.libpath",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
+      "StackConfigurations" : {
+        "property_description" : "\n          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n          the relevant Hadoop *-site.xml files. If the path is relative is looked within\n          the Oozie configuration directory; though the path can be absolute (i.e. to point\n          to Hadoop client conf/ directories in the local filesystem.\n      ",
+        "property_value" : "*=/etc/hadoop/conf",
+        "stack_version" : "1.2.1",
+        "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP",
+        "type" : "oozie-site.xml"
+      }
+    }
+  ]
+}

+ 173 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/WEBHCAT.json

@@ -0,0 +1,173 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.override.enabled",
+      "StackConfigurations" : {
+        "property_description" : "\n     Enable the override path in templeton.override.jars\n   ",
+        "property_value" : "false",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.override.enabled",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hive.archive",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hive archive.",
+        "property_value" : "hdfs:///apps/webhcat/hive.tar.gz",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.hive.archive",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.jar",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Templeton jar file.",
+        "property_value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.jar",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
+      "StackConfigurations" : {
+        "property_description" : "The hdfs path to the Hadoop streaming jar file.",
+        "property_value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.streaming.jar",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hadoop",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hadoop executable.",
+        "property_value" : "/usr/bin/hadoop",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.hadoop",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.pig.path",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Pig executable.",
+        "property_value" : "pig.tar.gz/pig/bin/pig",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.pig.path",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.libjars",
+      "StackConfigurations" : {
+        "property_description" : "Jars to add the the classpath.",
+        "property_value" : "/usr/lib/zookeeper/zookeeper.jar",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.libjars",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.pig.archive",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Pig archive.",
+        "property_value" : "hdfs:///apps/webhcat/pig.tar.gz",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.pig.archive",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hcat",
+      "StackConfigurations" : {
+        "property_description" : "The path to the hcatalog executable.",
+        "property_value" : "/usr/bin/hcat",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.hcat",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hive.path",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hive executable.",
+        "property_value" : "hive.tar.gz/hive/bin/hive",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.hive.path",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.storage.class",
+      "StackConfigurations" : {
+        "property_description" : "The class to use as storage",
+        "property_value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.storage.class",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hadoop configuration.",
+        "property_value" : "/etc/hadoop/conf",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.hadoop.conf.dir",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.port",
+      "StackConfigurations" : {
+        "property_description" : "The HTTP port for the main server.",
+        "property_value" : "50111",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.port",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
+      "StackConfigurations" : {
+        "property_description" : "Time out for templeton api",
+        "property_value" : "60000",
+        "stack_version" : "1.2.1",
+        "property_name" : "templeton.exec.timeout",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP",
+        "type" : "webhcat-site.xml"
+      }
+    }
+  ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/ZOOKEEPER.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/ZOOKEEPER/configurations?fields=*",
+  "items" : [ ]
+}

+ 65 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/GANGLIA.json

@@ -0,0 +1,65 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations?fields=*&_=1368459065278",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
+      "StackConfigurations" : {
+        "property_description" : "Run directories for Ganglia",
+        "property_value" : "/var/run/ganglia/hdp",
+        "stack_version" : "1.3.0",
+        "property_name" : "ganglia_runtime_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
+      "StackConfigurations" : {
+        "property_description" : "User ",
+        "property_value" : "nobody",
+        "stack_version" : "1.3.0",
+        "property_name" : "gmetad_user",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
+      "StackConfigurations" : {
+        "property_description" : "Default directory for saving the rrd files on ganglia server",
+        "property_value" : "/var/lib/ganglia/rrds",
+        "stack_version" : "1.3.0",
+        "property_name" : "rrdcached_base_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmond_user",
+      "StackConfigurations" : {
+        "property_description" : "User ",
+        "property_value" : "nobody",
+        "stack_version" : "1.3.0",
+        "property_name" : "gmond_user",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
+      "StackConfigurations" : {
+        "property_description" : "Config directory for Ganglia",
+        "property_value" : "/etc/ganglia/hdp",
+        "stack_version" : "1.3.0",
+        "property_name" : "ganglia_conf_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/HBASE.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HBASE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/HCATALOG.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HCATALOG.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/HDFS.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/HIVE.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/HUE.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HUE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/MAPREDUCE.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/MAPREDUCE.json


+ 41 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/NAGIOS.json

@@ -0,0 +1,41 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations?fields=*&_=1368459065260",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_group",
+      "StackConfigurations" : {
+        "property_description" : "Nagios Group.",
+        "property_value" : "nagios",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_group",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_web_login",
+      "StackConfigurations" : {
+        "property_description" : "Nagios web user.",
+        "property_value" : "nagiosadmin",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_web_login",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_user",
+      "StackConfigurations" : {
+        "property_description" : "Nagios Username.",
+        "property_value" : "nagios",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_user",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/OOZIE.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/OOZIE.json


+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/PIG.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
+  "items" : [ ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/SQOOP.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
+  "items" : [ ]
+}

+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/WEBHCAT.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/WEBHCAT.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/ZOOKEEPER.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/ZOOKEEPER.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version130/global.json → ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/global.json


+ 0 - 60
ambari-web/app/assets/data/wizard/stack/hdp/version122/HBASE.json

@@ -1,60 +0,0 @@
-{
-  "name" : "HBASE",
-  "version" : "0.94.5",
-  "user" : "mapred",
-  "comment" : "Non-relational distributed database and centralized service for configuration management & synchronization",
-  "properties" : [ {
-    "name" : "hbase.cluster.distributed",
-    "value" : "true",
-    "description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.master.lease.thread.wakefrequency",
-    "value" : "3000",
-    "description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.superuser",
-    "value" : "hbase",
-    "description" : "List of users or groups (comma-separated), who are allowed\n    full privileges, regardless of stored ACLs, across the cluster.\n    Only used when HBase security is enabled.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.zookeeper.property.clientPort",
-    "value" : "2181",
-    "description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.regionserver.optionalcacheflushinterval",
-    "value" : "10000",
-    "description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
-    "filename" : "hbase-site.xml"
-  }, {
-    "name" : "hbase.zookeeper.useMulti",
-    "value" : "true",
-    "description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).åá\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
-    "filename" : "hbase-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "HBASE_MASTER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HBASE_REGIONSERVER",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "HBASE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HBASE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 20
ambari-web/app/assets/data/wizard/stack/hdp/version122/HCATALOG.json

@@ -1,20 +0,0 @@
-{
-  "name" : "HCATALOG",
-  "version" : "0.5.0",
-  "user" : "root",
-  "comment" : "This is comment for HCATALOG service",
-  "properties" : [ ],
-  "components" : [ {
-    "name" : "HCAT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : true,
-  "clientComponent" : {
-    "name" : "HCAT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 210
ambari-web/app/assets/data/wizard/stack/hdp/version122/HDFS.json

@@ -1,210 +0,0 @@
-{
-  "name" : "HDFS",
-  "version" : "1.1.2",
-  "user" : "root",
-  "comment" : "Apache Hadoop Distributed File System",
-  "properties" : [ {
-    "name" : "dfs.datanode.socket.write.timeout",
-    "value" : "0",
-    "description" : "DFS Client write socket timeout",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.replication.max",
-    "value" : "50",
-    "description" : "Maximal block replication.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.heartbeat.interval",
-    "value" : "3",
-    "description" : "Determines datanode heartbeat interval in seconds.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.safemode.threshold.pct",
-    "value" : "1.0f",
-    "description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.balance.bandwidthPerSec",
-    "value" : "6250000",
-    "description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.block.size",
-    "value" : "134217728",
-    "description" : "The default block size for new files.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.ipc.address",
-    "value" : "0.0.0.0:8010",
-    "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.blockreport.initialDelay",
-    "value" : "120",
-    "description" : "Delay for first block report in seconds.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.du.pct",
-    "value" : "0.85f",
-    "description" : "When calculating remaining space, only use this percentage of the real available space\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.namenode.handler.count",
-    "value" : "40",
-    "description" : "The number of server threads for the namenode.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.max.xcievers",
-    "value" : "4096",
-    "description" : "PRIVATE CONFIG VARIABLE",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.umaskmode",
-    "value" : "077",
-    "description" : "\nThe octal umask used when creating files and directories.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.web.ugi",
-    "value" : "gopher,gopher",
-    "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.permissions",
-    "value" : "true",
-    "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.permissions.supergroup",
-    "value" : "hdfs",
-    "description" : "The name of the group of super-users.",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.namenode.handler.count",
-    "value" : "100",
-    "description" : "Added to grow Queue size so that more client connections are allowed",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "ipc.server.max.response.size",
-    "value" : "5242880",
-    "description" : null,
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.block.access.token.enable",
-    "value" : "true",
-    "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.secondary.https.port",
-    "value" : "50490",
-    "description" : "The https port where secondary-namenode binds",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.https.port",
-    "value" : "50470",
-    "description" : "The https port where namenode binds",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.access.time.precision",
-    "value" : "0",
-    "description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.cluster.administrators",
-    "value" : " hdfs",
-    "description" : "ACL for who all can view the default servlets in the HDFS",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "ipc.server.read.threadpool.size",
-    "value" : "5",
-    "description" : null,
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "dfs.datanode.failed.volumes.tolerated",
-    "value" : "0",
-    "description" : "Number of failed disks datanode would tolerate",
-    "filename" : "hdfs-site.xml"
-  }, {
-    "name" : "io.file.buffer.size",
-    "value" : "131072",
-    "description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "io.serializations",
-    "value" : "org.apache.hadoop.io.serializer.WritableSerialization",
-    "description" : null,
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "io.compression.codec.lzo.class",
-    "value" : "com.hadoop.compression.lzo.LzoCodec",
-    "description" : "The implementation for lzo codec.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.trash.interval",
-    "value" : "360",
-    "description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.edits.dir",
-    "value" : "${fs.checkpoint.dir}",
-    "description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.period",
-    "value" : "21600",
-    "description" : "The number of seconds between two periodic checkpoints.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "fs.checkpoint.size",
-    "value" : "536870912",
-    "description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.idlethreshold",
-    "value" : "8000",
-    "description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.connection.maxidletime",
-    "value" : "30000",
-    "description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "ipc.client.connect.max.retries",
-    "value" : "50",
-    "description" : "Defines the maximum number of retries for IPC connections.",
-    "filename" : "core-site.xml"
-  }, {
-    "name" : "webinterface.private.actions",
-    "value" : "false",
-    "description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
-    "filename" : "core-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "NAMENODE",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "DATANODE",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "SECONDARY_NAMENODE",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HDFS_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HDFS_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 95
ambari-web/app/assets/data/wizard/stack/hdp/version122/HIVE.json

@@ -1,95 +0,0 @@
-{
-  "name" : "HIVE",
-  "version" : "0.10.0",
-  "user" : "root",
-  "comment" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
-  "properties" : [ {
-    "name" : "hive.metastore.local",
-    "value" : "false",
-    "description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "javax.jdo.option.ConnectionDriverName",
-    "value" : "com.mysql.jdbc.Driver",
-    "description" : "Driver class name for a JDBC metastore",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.warehouse.dir",
-    "value" : "/apps/hive/warehouse",
-    "description" : "location of default database for the warehouse",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.cache.pinobjtypes",
-    "value" : "Table,Database,Type,FieldSchema,Order",
-    "description" : "List of comma separated metastore object types that should be pinned in the cache",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.semantic.analyzer.factory.impl",
-    "value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
-    "description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hadoop.clientside.fs.operations",
-    "value" : "true",
-    "description" : "FS operations are owned by client",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.client.socket.timeout",
-    "value" : "60",
-    "description" : "MetaStore Client socket timeout in seconds",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.metastore.execute.setugi",
-    "value" : "true",
-    "description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.security.authorization.enabled",
-    "value" : "true",
-    "description" : "enable or disable the hive client authorization",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.security.authorization.manager",
-    "value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
-    "description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "hive.server2.enable.doAs",
-    "value" : "true",
-    "description" : null,
-    "filename" : "hive-site.xml"
-  }, {
-    "name" : "fs.hdfs.impl.disable.cache",
-    "value" : "true",
-    "description" : null,
-    "filename" : "hive-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "HIVE_METASTORE",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HIVE_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "MYSQL_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "HIVE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "HIVE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 230
ambari-web/app/assets/data/wizard/stack/hdp/version122/MAPREDUCE.json

@@ -1,230 +0,0 @@
-{
-  "name" : "MAPREDUCE",
-  "version" : "1.1.2",
-  "user" : "mapred",
-  "comment" : "Apache Hadoop Distributed Processing Framework",
-  "properties" : [ {
-    "name" : "io.sort.record.percent",
-    "value" : ".2",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "io.sort.factor",
-    "value" : "100",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
-    "value" : "250",
-    "description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.handler.count",
-    "value" : "50",
-    "description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.system.dir",
-    "value" : "/mapred/system",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.cluster.administrators",
-    "value" : " hadoop",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.parallel.copies",
-    "value" : "30",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "tasktracker.http.threads",
-    "value" : "50",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.map.tasks.speculative.execution",
-    "value" : "false",
-    "description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.tasks.speculative.execution",
-    "value" : "false",
-    "description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.reduce.slowstart.completed.maps",
-    "value" : "0.05",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.inmem.merge.threshold",
-    "value" : "1000",
-    "description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.shuffle.merge.percent",
-    "value" : "0.66",
-    "description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.shuffle.input.buffer.percent",
-    "value" : "0.7",
-    "description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.output.compression.type",
-    "value" : "BLOCK",
-    "description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.completeuserjobs.maximum",
-    "value" : "0",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.restart.recover",
-    "value" : "false",
-    "description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.reduce.input.buffer.percent",
-    "value" : "0.0",
-    "description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.reduce.input.limit",
-    "value" : "10737418240",
-    "description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.task.timeout",
-    "value" : "600000",
-    "description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "jetty.connector",
-    "value" : "org.mortbay.jetty.nio.SelectChannelConnector",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.child.root.logger",
-    "value" : "INFO,TLA",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.max.tracker.blacklists",
-    "value" : "16",
-    "description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.healthChecker.interval",
-    "value" : "135000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.healthChecker.script.timeout",
-    "value" : "60000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.persist.jobstatus.active",
-    "value" : "false",
-    "description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.persist.jobstatus.hours",
-    "value" : "1",
-    "description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.retirejob.check",
-    "value" : "10000",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.retirejob.interval",
-    "value" : "0",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.tracker.history.completed.location",
-    "value" : "/mapred/history/done",
-    "description" : "No description",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
-    "value" : "false",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.job.reuse.jvm.num.tasks",
-    "value" : "1",
-    "description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "hadoop.job.history.user.location",
-    "value" : "none",
-    "description" : null,
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.jobtracker.staging.root.dir",
-    "value" : "/user",
-    "description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.tasktracker.group",
-    "value" : "hadoop",
-    "description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.jobtracker.split.metainfo.maxsize",
-    "value" : "50000000",
-    "description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapreduce.history.server.embedded",
-    "value" : "false",
-    "description" : "Should job history server be embedded within Job tracker\nprocess",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.blacklist.fault-timeout-window",
-    "value" : "180",
-    "description" : "\n    3-hour sliding window (value is in minutes)\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.jobtracker.blacklist.fault-bucket-width",
-    "value" : "15",
-    "description" : "\n    15-minute bucket size (value is in minutes)\n  ",
-    "filename" : "mapred-site.xml"
-  }, {
-    "name" : "mapred.queue.names",
-    "value" : "default",
-    "description" : " Comma separated list of queues configured for this jobtracker.",
-    "filename" : "mapred-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "JOBTRACKER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "TASKTRACKER",
-    "category" : "SLAVE",
-    "client" : false,
-    "master" : false
-  }, {
-    "name" : "MAPREDUCE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "MAPREDUCE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 155
ambari-web/app/assets/data/wizard/stack/hdp/version122/OOZIE.json

@@ -1,155 +0,0 @@
-{
-  "name" : "OOZIE",
-  "version" : "3.2.0",
-  "user" : "root",
-  "comment" : "System for workflow coordination and execution of Apache Hadoop jobs",
-  "properties" : [ {
-    "name" : "oozie.base.url",
-    "value" : "http://localhost:11000/oozie",
-    "description" : "Base Oozie URL.",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.system.id",
-    "value" : "oozie-${user.name}",
-    "description" : "\n    The Oozie system ID.\n    ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.systemmode",
-    "value" : "NORMAL",
-    "description" : "\n     System mode for  Oozie at startup.\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.AuthorizationService.security.enabled",
-    "value" : "true",
-    "description" : "\n     Specifies whether security (user name/admin role) is enabled or not.\n     If disabled any user can manage Oozie system and manage any job.\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.older.than",
-    "value" : "30",
-    "description" : "\n     Jobs older than this value, in days, will be purged by the PurgeService.\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.PurgeService.purge.interval",
-    "value" : "3600",
-    "description" : "\n     Interval at which the purge service will run, in seconds.\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.queue.size",
-    "value" : "1000",
-    "description" : "Max callable queue size",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.threads",
-    "value" : "10",
-    "description" : "Number of threads used for executing callables",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.CallableQueueService.callable.concurrency",
-    "value" : "3",
-    "description" : "\n     Maximum concurrency for a given callable type.\n     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n     All commands that use action executors (action-start, action-end, action-kill and action-check) use\n     the action type as the callable type.\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.coord.normal.default.timeout",
-    "value" : "120",
-    "description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n      -1 means infinite timeout",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.db.schema.name",
-    "value" : "oozie",
-    "description" : "\n      Oozie DataBase Name\n     ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
-    "value" : " ",
-    "description" : "\n      Whitelisted job tracker for Oozie service.\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.type",
-    "value" : "simple",
-    "description" : "\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
-    "value" : " ",
-    "description" : "\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.WorkflowAppService.system.libpath",
-    "value" : "/user/${user.name}/share/lib",
-    "description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
-    "value" : "false",
-    "description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.authentication.kerberos.name.rules",
-    "value" : "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT\n        ",
-    "description" : "The mapping from kerberos principal names to local OS user names.",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
-    "value" : "*=/etc/hadoop/conf",
-    "description" : "\n          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n          the relevant Hadoop *-site.xml files. If the path is relative is looked within\n          the Oozie configuration directory; though the path can be absolute (i.e. to point\n          to Hadoop client conf/ directories in the local filesystem.\n      ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.ActionService.executor.ext.classes",
-    "value" : "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor\n        ",
-    "description" : null,
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.SchemaService.wf.ext.schemas",
-    "value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
-    "description" : null,
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.create.db.schema",
-    "value" : "false",
-    "description" : "\n            Creates Oozie DB.\n\n            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n        ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.driver",
-    "value" : "org.apache.derby.jdbc.EmbeddedDriver",
-    "description" : "\n            JDBC driver class.\n        ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.url",
-    "value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
-    "description" : "\n            JDBC URL.\n        ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.username",
-    "value" : "sa",
-    "description" : "\n            DB user name.\n        ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.jdbc.password",
-    "value" : " ",
-    "description" : "\n            DB user password.\n\n            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n                       if empty Configuration assumes it is NULL.\n        ",
-    "filename" : "oozie-site.xml"
-  }, {
-    "name" : "oozie.service.JPAService.pool.max.active.conn",
-    "value" : "10",
-    "description" : "\n             Max number of connections.\n        ",
-    "filename" : "oozie-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "OOZIE_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "OOZIE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "OOZIE_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 0 - 90
ambari-web/app/assets/data/wizard/stack/hdp/version122/WEBHCAT.json

@@ -1,90 +0,0 @@
-{
-  "name" : "WEBHCAT",
-  "version" : "0.5.0",
-  "user" : "root",
-  "comment" : "This is comment for WEBHCAT service",
-  "properties" : [ {
-    "name" : "templeton.port",
-    "value" : "50111",
-    "description" : "The HTTP port for the main server.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.hadoop.conf.dir",
-    "value" : "/etc/hadoop/conf",
-    "description" : "The path to the Hadoop configuration.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.jar",
-    "value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
-    "description" : "The path to the Templeton jar file.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.libjars",
-    "value" : "/usr/lib/zookeeper/zookeeper.jar",
-    "description" : "Jars to add the the classpath.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.hadoop",
-    "value" : "/usr/bin/hadoop",
-    "description" : "The path to the Hadoop executable.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.pig.archive",
-    "value" : "hdfs:///apps/webhcat/pig.tar.gz",
-    "description" : "The path to the Pig archive.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.pig.path",
-    "value" : "pig.tar.gz/pig/bin/pig",
-    "description" : "The path to the Pig executable.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.hcat",
-    "value" : "/usr/bin/hcat",
-    "description" : "The path to the hcatalog executable.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.hive.archive",
-    "value" : "hdfs:///apps/webhcat/hive.tar.gz",
-    "description" : "The path to the Hive archive.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.hive.path",
-    "value" : "hive.tar.gz/hive/bin/hive",
-    "description" : "The path to the Hive executable.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.storage.class",
-    "value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
-    "description" : "The class to use as storage",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.override.enabled",
-    "value" : "false",
-    "description" : "\n     Enable the override path in templeton.override.jars\n   ",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.streaming.jar",
-    "value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
-    "description" : "The hdfs path to the Hadoop streaming jar file.",
-    "filename" : "webhcat-site.xml"
-  }, {
-    "name" : "templeton.exec.timeout",
-    "value" : "60000",
-    "description" : "Time out for templeton api",
-    "filename" : "webhcat-site.xml"
-  } ],
-  "components" : [ {
-    "name" : "WEBHCAT_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "WEBHCAT_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }
-}

+ 0 - 25
ambari-web/app/assets/data/wizard/stack/hdp/version122/ZOOKEEPER.json

@@ -1,25 +0,0 @@
-{
-  "name" : "ZOOKEEPER",
-  "version" : "3.4.5",
-  "user" : "root",
-  "comment" : "This is comment for ZOOKEEPER service",
-  "properties" : [ ],
-  "components" : [ {
-    "name" : "ZOOKEEPER_SERVER",
-    "category" : "MASTER",
-    "client" : false,
-    "master" : true
-  }, {
-    "name" : "ZOOKEEPER_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  } ],
-  "clientOnlyService" : false,
-  "clientComponent" : {
-    "name" : "ZOOKEEPER_CLIENT",
-    "category" : "CLIENT",
-    "client" : true,
-    "master" : false
-  }
-}

+ 65 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/GANGLIA.json

@@ -0,0 +1,65 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations?fields=*&_=1368459065278",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
+      "StackConfigurations" : {
+        "property_description" : "Run directories for Ganglia",
+        "property_value" : "/var/run/ganglia/hdp",
+        "stack_version" : "1.3.0",
+        "property_name" : "ganglia_runtime_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
+      "StackConfigurations" : {
+        "property_description" : "User ",
+        "property_value" : "nobody",
+        "stack_version" : "1.3.0",
+        "property_name" : "gmetad_user",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
+      "StackConfigurations" : {
+        "property_description" : "Default directory for saving the rrd files on ganglia server",
+        "property_value" : "/var/lib/ganglia/rrds",
+        "stack_version" : "1.3.0",
+        "property_name" : "rrdcached_base_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmond_user",
+      "StackConfigurations" : {
+        "property_description" : "User ",
+        "property_value" : "nobody",
+        "stack_version" : "1.3.0",
+        "property_name" : "gmond_user",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
+      "StackConfigurations" : {
+        "property_description" : "Config directory for Ganglia",
+        "property_value" : "/etc/ganglia/hdp",
+        "stack_version" : "1.3.0",
+        "property_name" : "ganglia_conf_dir",
+        "service_name" : "GANGLIA",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 281 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HBASE.json

@@ -0,0 +1,281 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/zookeeper_sessiontimeout",
+      "StackConfigurations" : {
+        "property_description" : "ZooKeeper Session Timeout",
+        "property_value" : "60000",
+        "stack_version" : "1.3.0",
+        "property_name" : "zookeeper_sessiontimeout",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_master_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "HBase Master Heap Size",
+        "property_value" : "1024",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_master_heapsize",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstore_compactionthreshold",
+      "StackConfigurations" : {
+        "property_description" : "HBase HStore compaction threshold.",
+        "property_value" : "3",
+        "stack_version" : "1.3.0",
+        "property_name" : "hstore_compactionthreshold",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_blockcache_size",
+      "StackConfigurations" : {
+        "property_description" : "HFile block cache size.",
+        "property_value" : "0.25",
+        "stack_version" : "1.3.0",
+        "property_name" : "hfile_blockcache_size",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.client.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HRegionInterface protocol implementations (ie. \n    clients talking to HRegionServers)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.client.protocol.acl",
+        "service_name" : "HBASE",
+        "type" : "hbase-policy.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_blockmultiplier",
+      "StackConfigurations" : {
+        "property_description" : "HBase Region Block Multiplier",
+        "property_value" : "2",
+        "stack_version" : "1.3.0",
+        "property_name" : "hregion_blockmultiplier",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
+      "StackConfigurations" : {
+        "property_description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).В·\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase.zookeeper.useMulti",
+        "service_name" : "HBASE",
+        "type" : "hbase-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_conf_dir",
+      "StackConfigurations" : {
+        "property_description" : "Config Directory for HBase.",
+        "property_value" : "/etc/hbase",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_conf_dir",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
+      "StackConfigurations" : {
+        "property_description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
+        "property_value" : "2181",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase.zookeeper.property.clientPort",
+        "service_name" : "HBASE",
+        "type" : "hbase-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_log_dir",
+      "StackConfigurations" : {
+        "property_description" : "Log Directories for HBase.",
+        "property_value" : "/var/log/hbase",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_log_dir",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_tmp_dir",
+      "StackConfigurations" : {
+        "property_description" : "Hbase temp directory",
+        "property_value" : "/var/log/hbase",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_tmp_dir",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_max_keyvalue_size",
+      "StackConfigurations" : {
+        "property_description" : "HBase Client Maximum key-value Size",
+        "property_value" : "10485760",
+        "stack_version" : "1.3.0",
+        "property_name" : "hfile_max_keyvalue_size",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_regionserver_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "Log Directories for HBase.",
+        "property_value" : "1024",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_regionserver_heapsize",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_majorcompaction",
+      "StackConfigurations" : {
+        "property_description" : "HBase Major Compaction.",
+        "property_value" : "86400000",
+        "stack_version" : "1.3.0",
+        "property_name" : "hregion_majorcompaction",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/client_scannercaching",
+      "StackConfigurations" : {
+        "property_description" : "Base Client Scanner Caching",
+        "property_value" : "100",
+        "stack_version" : "1.3.0",
+        "property_name" : "client_scannercaching",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HMasterRegionInterface protocol implementations\n    (for HRegionServers communicating with HMaster)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.masterregion.protocol.acl",
+        "service_name" : "HBASE",
+        "type" : "hbase-policy.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.cluster.distributed",
+      "StackConfigurations" : {
+        "property_description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase.cluster.distributed",
+        "service_name" : "HBASE",
+        "type" : "hbase-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
+      "StackConfigurations" : {
+        "property_description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
+        "property_value" : "3000",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase.master.lease.thread.wakefrequency",
+        "service_name" : "HBASE",
+        "type" : "hbase-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/regionserver_handlers",
+      "StackConfigurations" : {
+        "property_description" : "HBase RegionServer Handler",
+        "property_value" : "30",
+        "stack_version" : "1.3.0",
+        "property_name" : "regionserver_handlers",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
+      "StackConfigurations" : {
+        "property_description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
+        "property_value" : "10000",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase.regionserver.optionalcacheflushinterval",
+        "service_name" : "HBASE",
+        "type" : "hbase-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_pid_dir",
+      "StackConfigurations" : {
+        "property_description" : "Log Directories for HBase.",
+        "property_value" : "/var/run/hbase",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_pid_dir",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstorefile_maxsize",
+      "StackConfigurations" : {
+        "property_description" : "Maximum HStoreFile Size",
+        "property_value" : "1073741824",
+        "stack_version" : "1.3.0",
+        "property_name" : "hstorefile_maxsize",
+        "service_name" : "HBASE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.admin.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for HMasterInterface protocol implementation (ie. \n    clients talking to HMaster for admin operations).\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.admin.protocol.acl",
+        "service_name" : "HBASE",
+        "type" : "hbase-policy.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HCATALOG.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG/configurations?fields=*",
+  "items" : [ ]
+}

+ 737 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json

@@ -0,0 +1,737 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+      "StackConfigurations" : {
+        "property_description" : "Delay for first block report in seconds.",
+        "property_value" : "120",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.blockreport.initialDelay",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
+        "property_value" : "1.0f",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.safemode.threshold.pct",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
+      "StackConfigurations" : {
+        "property_description" : "Secondary NameNode checkpoint dir.",
+        "property_value" : "/hadoop/hdfs/namesecondary",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs_checkpoint_dir",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
+      "StackConfigurations" : {
+        "property_description" : "\nThe octal umask used when creating files and directories.\n",
+        "property_value" : "077",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.umaskmode",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+      "StackConfigurations" : {
+        "property_description" : "The implementation for lzo codec.",
+        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
+        "stack_version" : "1.3.0",
+        "property_name" : "io.compression.codec.lzo.class",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+      "StackConfigurations" : {
+        "property_description" : "Determines datanode heartbeat interval in seconds.",
+        "property_value" : "3",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.heartbeat.interval",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
+      "StackConfigurations" : {
+        "property_description" : "DataNode volumes failure toleration",
+        "property_value" : "0",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs_datanode_failed_volume_tolerated",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
+      "StackConfigurations" : {
+        "property_description" : "NameNode Directories.",
+        "property_value" : "/hadoop/hdfs/namenode",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs_name_dir",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
+      "StackConfigurations" : {
+        "property_description" : "FS Checkpoint Size.",
+        "property_value" : "0.5",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs_checkpoint_size",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+      "StackConfigurations" : {
+        "property_description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
+        "property_value" : "6250000",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.balance.bandwidthPerSec",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode maximum new generation size",
+        "property_value" : "640",
+        "stack_version" : "1.3.0",
+        "property_name" : "namenode_opt_maxnewsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+      "StackConfigurations" : {
+        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
+        "property_value" : "${fs.checkpoint.dir}",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs.checkpoint.edits.dir",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
+      "StackConfigurations" : {
+        "property_description" : "KeyTab Directory.",
+        "property_value" : "/etc/security/keytabs",
+        "stack_version" : "1.3.0",
+        "property_name" : "keytab_path",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
+      "StackConfigurations" : {
+        "property_description" : "The default block size for new files.",
+        "property_value" : "134217728",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.block.size",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop Security",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "security_enabled",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+        "stack_version" : "1.3.0",
+        "property_name" : "io.serializations",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.task.umbilical.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
+      "StackConfigurations" : {
+        "property_description" : "Kerberos realm.",
+        "property_value" : "EXAMPLE.COM",
+        "stack_version" : "1.3.0",
+        "property_name" : "kerberos_domain",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
+      "StackConfigurations" : {
+        "property_description" : "Data directories for Data Nodes.",
+        "property_value" : "/hadoop/hdfs/data",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs_data_dir",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop Log Dir Prefix",
+        "property_value" : "/var/log/hadoop",
+        "stack_version" : "1.3.0",
+        "property_name" : "hdfs_log_dir_prefix",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.datanode.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+      "StackConfigurations" : {
+        "property_description" : "Defines the maximum number of retries for IPC connections.",
+        "property_value" : "50",
+        "stack_version" : "1.3.0",
+        "property_name" : "ipc.client.connect.max.retries",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
+      "StackConfigurations" : {
+        "property_description" : "HDFS Maximum Checkpoint Delay",
+        "property_value" : "21600",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs_checkpoint_period",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "The number of server threads for the namenode.",
+        "property_value" : "40",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.namenode.handler.count",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+      "StackConfigurations" : {
+        "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+        "property_value" : "0.0.0.0:8010",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.datanode.ipc.address",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
+      "StackConfigurations" : {
+        "property_description" : "WebHDFS enabled",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs_webhdfs_enabled",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
+      "StackConfigurations" : {
+        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
+        "property_value" : "360",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs.trash.interval",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where secondary-namenode binds",
+        "property_value" : "50490",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.secondary.https.port",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
+      "StackConfigurations" : {
+        "property_description" : "Reserved space for HDFS",
+        "property_value" : "1",
+        "stack_version" : "1.3.0",
+        "property_name" : "datanode_du_reserved",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
+        "property_value" : "131072",
+        "stack_version" : "1.3.0",
+        "property_name" : "io.file.buffer.size",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
+      "StackConfigurations" : {
+        "property_description" : "Maximal block replication.\n  ",
+        "property_value" : "50",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.replication.max",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop PID Dir Prefix",
+        "property_value" : "/var/run/hadoop",
+        "stack_version" : "1.3.0",
+        "property_name" : "hadoop_pid_dir_prefix",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.inter.datanode.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+      "StackConfigurations" : {
+        "property_description" : "DFS Client write socket timeout",
+        "property_value" : "0",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.datanode.socket.write.timeout",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+      "StackConfigurations" : {
+        "property_description" : "PRIVATE CONFIG VARIABLE",
+        "property_value" : "4096",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.datanode.max.xcievers",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5242880",
+        "stack_version" : "1.3.0",
+        "property_name" : "ipc.server.max.response.size",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
+      "StackConfigurations" : {
+        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
+        "property_value" : "536870912",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs.checkpoint.size",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.namenode.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.permissions",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
+      "StackConfigurations" : {
+        "property_description" : "The https port where namenode binds",
+        "property_value" : "50470",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.https.port",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode Java heap size",
+        "property_value" : "1024",
+        "stack_version" : "1.3.0",
+        "property_name" : "namenode_heapsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "Added to grow Queue size so that more client connections are allowed",
+        "property_value" : "100",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.namenode.handler.count",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
+      "StackConfigurations" : {
+        "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+        "property_value" : "gopher,gopher",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.web.ugi",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+      "StackConfigurations" : {
+        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
+        "property_value" : "0.85f",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.datanode.du.pct",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
+      "StackConfigurations" : {
+        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
+        "property_value" : "21600",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs.checkpoint.period",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+      "StackConfigurations" : {
+        "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.block.access.token.enable",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
+      "StackConfigurations" : {
+        "property_description" : "ACL for who all can view the default servlets in the HDFS",
+        "property_value" : " hdfs",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.cluster.administrators",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "DataNode maximum Java heap size",
+        "property_value" : "1024",
+        "stack_version" : "1.3.0",
+        "property_name" : "dtnode_heapsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.job.submission.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
+      "StackConfigurations" : {
+        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "webinterface.private.actions",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+      "StackConfigurations" : {
+        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
+        "property_value" : "30000",
+        "stack_version" : "1.3.0",
+        "property_name" : "ipc.client.connection.maxidletime",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+      "StackConfigurations" : {
+        "property_description" : "The name of the group of super-users.",
+        "property_value" : "hdfs",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.permissions.supergroup",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
+      "StackConfigurations" : {
+        "property_description" : "Hadoop maximum Java heap size",
+        "property_value" : "1024",
+        "stack_version" : "1.3.0",
+        "property_name" : "hadoop_heapsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+      "StackConfigurations" : {
+        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
+        "property_value" : "8000",
+        "stack_version" : "1.3.0",
+        "property_name" : "ipc.client.idlethreshold",
+        "filename" : "core-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.inter.tracker.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+      "StackConfigurations" : {
+        "property_description" : "Number of failed disks datanode would tolerate",
+        "property_value" : "0",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.datanode.failed.volumes.tolerated",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
+      "StackConfigurations" : {
+        "property_description" : "NameNode new generation size",
+        "property_value" : "200",
+        "stack_version" : "1.3.0",
+        "property_name" : "namenode_opt_newsize",
+        "filename" : "global.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.client.datanode.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
+      "StackConfigurations" : {
+        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
+        "property_value" : "*",
+        "stack_version" : "1.3.0",
+        "property_name" : "security.client.protocol.acl",
+        "filename" : "hadoop-policy.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5",
+        "stack_version" : "1.3.0",
+        "property_name" : "ipc.server.read.threadpool.size",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
+      "StackConfigurations" : {
+        "property_description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
+        "property_value" : "0",
+        "stack_version" : "1.3.0",
+        "property_name" : "dfs.access.time.precision",
+        "filename" : "hdfs-site.xml",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 209 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HIVE.json

@@ -0,0 +1,209 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
+      "StackConfigurations" : {
+        "property_description" : "Driver class name for a JDBC metastore",
+        "property_value" : "com.mysql.jdbc.Driver",
+        "stack_version" : "1.3.0",
+        "property_name" : "javax.jdo.option.ConnectionDriverName",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
+      "StackConfigurations" : {
+        "property_description" : "Hive PID Dir.",
+        "property_value" : "/var/run/hive",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive_pid_dir",
+        "filename" : "global.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
+      "StackConfigurations" : {
+        "property_description" : "enable or disable the hive client authorization",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.security.authorization.enabled",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
+      "StackConfigurations" : {
+        "property_description" : "Hive Conf Dir.",
+        "property_value" : "/etc/hive/conf",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive_conf_dir",
+        "filename" : "global.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
+      "StackConfigurations" : {
+        "property_description" : "FS operations are owned by client",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hadoop.clientside.fs.operations",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
+      "StackConfigurations" : {
+        "property_description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
+        "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.security.authorization.manager",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "fs.hdfs.impl.disable.cache",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
+      "StackConfigurations" : {
+        "property_description" : "location of default database for the warehouse",
+        "property_value" : "/apps/hive/warehouse",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.metastore.warehouse.dir",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
+      "StackConfigurations" : {
+        "property_description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
+        "property_value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.semantic.analyzer.factory.impl",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
+      "StackConfigurations" : {
+        "property_description" : "Hive auxiliary jar path.",
+        "property_value" : "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive_aux_jars_path",
+        "filename" : "global.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
+      "StackConfigurations" : {
+        "property_description" : "MetaStore Client socket timeout in seconds",
+        "property_value" : "60",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.metastore.client.socket.timeout",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.server2.enable.doAs",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
+      "StackConfigurations" : {
+        "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
+        "property_value" : "Table,Database,Type,FieldSchema,Order",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.metastore.cache.pinobjtypes",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
+      "StackConfigurations" : {
+        "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.metastore.execute.setugi",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
+      "StackConfigurations" : {
+        "property_description" : "Hive PID Dir.",
+        "property_value" : "${download_url}/mysql-connector-java-5.1.18.zip",
+        "stack_version" : "1.3.0",
+        "property_name" : "mysql_connector_url",
+        "filename" : "global.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.local",
+      "StackConfigurations" : {
+        "property_description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive.metastore.local",
+        "filename" : "hive-site.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
+      "StackConfigurations" : {
+        "property_description" : "Directory for Hive Log files.",
+        "property_value" : "/var/log/hive",
+        "stack_version" : "1.3.0",
+        "property_name" : "hive_log_dir",
+        "filename" : "global.xml",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 353 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HUE.json

@@ -0,0 +1,353 @@
+{
+  "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/pig_shell_command",
+      "StackConfigurations" : {
+        "property_description" : "Define and configure a new shell type pig.",
+        "property_value" : "/usr/bin/pig -l /dev/null",
+        "stack_version" : "1.3.0",
+        "property_name" : "pig_shell_command",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_name",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "sandbox",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_name",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_user",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "sandbox",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_user",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_host",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "localhost",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_host",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_password",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "1111",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_password",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/time_zone",
+      "StackConfigurations" : {
+        "property_description" : "Time zone name",
+        "property_value" : "America/Los_Angeles",
+        "stack_version" : "1.3.0",
+        "property_name" : "time_zone",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_host",
+      "StackConfigurations" : {
+        "property_description" : "Webserver listens on this address and port",
+        "property_value" : "0.0.0.0",
+        "stack_version" : "1.3.0",
+        "property_name" : "http_host",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_pid_dir",
+      "StackConfigurations" : {
+        "property_description" : "Hue Pid Dir.",
+        "property_value" : "/var/run/hue",
+        "stack_version" : "1.3.0",
+        "property_name" : "hue_pid_dir",
+        "service_name" : "HUE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/tls",
+      "StackConfigurations" : {
+        "property_description" : "Whether to use a TLS (secure) connection when talking to the SMTP server.",
+        "property_value" : "no",
+        "stack_version" : "1.3.0",
+        "property_name" : "tls",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hadoop_mapred_home",
+      "StackConfigurations" : {
+        "property_description" : "The SMTP server information for email notification delivery.",
+        "property_value" : "/usr/lib/hadoop/lib",
+        "stack_version" : "1.3.0",
+        "property_name" : "hadoop_mapred_home",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/default_from_email",
+      "StackConfigurations" : {
+        "property_description" : "The SMTP server information for email notification delivery.",
+        "property_value" : "sandbox@hortonworks.com",
+        "stack_version" : "1.3.0",
+        "property_name" : "default_from_email",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/backend_auth_policy",
+      "StackConfigurations" : {
+        "property_description" : "Authentication backend.",
+        "property_value" : "desktop.auth.backend.AllowAllBackend",
+        "stack_version" : "1.3.0",
+        "property_name" : "backend_auth_policy",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_log_dir",
+      "StackConfigurations" : {
+        "property_description" : "Hue Log Dir.",
+        "property_value" : "/var/log/hue",
+        "stack_version" : "1.3.0",
+        "property_name" : "hue_log_dir",
+        "service_name" : "HUE",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/whitelist",
+      "StackConfigurations" : {
+        "property_description" : "proxy settings",
+        "property_value" : "(localhost|127\\.0\\.0\\.1):(50030|50070|50060|50075|50111)",
+        "stack_version" : "1.3.0",
+        "property_name" : "whitelist",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/jobtracker_port",
+      "StackConfigurations" : {
+        "property_description" : "The port where the JobTracker IPC listens on.",
+        "property_value" : "50030",
+        "stack_version" : "1.3.0",
+        "property_name" : "jobtracker_port",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_port",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "3306",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_port",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_port",
+      "StackConfigurations" : {
+        "property_description" : "The SMTP server information for email notification delivery.",
+        "property_value" : "25",
+        "stack_version" : "1.3.0",
+        "property_name" : "smtp_port",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/database_logging",
+      "StackConfigurations" : {
+        "property_description" : "To show database transactions, set database_logging to 1.\n      default, database_logging=0",
+        "property_value" : "0",
+        "stack_version" : "1.3.0",
+        "property_name" : "database_logging",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/send_debug_messages",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "1",
+        "stack_version" : "1.3.0",
+        "property_name" : "send_debug_messages",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_password",
+      "StackConfigurations" : {
+        "property_description" : "The SMTP server information for email notification delivery.",
+        "property_value" : "25",
+        "stack_version" : "1.3.0",
+        "property_name" : "smtp_password",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/django_debug_mode",
+      "StackConfigurations" : {
+        "property_description" : "Turn off debug",
+        "property_value" : "1",
+        "stack_version" : "1.3.0",
+        "property_name" : "django_debug_mode",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/use_cherrypy_server",
+      "StackConfigurations" : {
+        "property_description" : "Set to true to use CherryPy as the webserver, set to false\n      to use Spawning as the webserver. Defaults to Spawning if\n      key is not specified.",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "use_cherrypy_server",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_shell_command",
+      "StackConfigurations" : {
+        "property_description" : "Define and configure a new shell type hbase.",
+        "property_value" : "/usr/bin/hbase shell",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_shell_command",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/bash_shell_command",
+      "StackConfigurations" : {
+        "property_description" : "Define and configure a new shell type bash for testing only\n      .",
+        "property_value" : "/bin/bash",
+        "stack_version" : "1.3.0",
+        "property_name" : "bash_shell_command",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_nice_name",
+      "StackConfigurations" : {
+        "property_description" : "Define and configure a new shell type hbase",
+        "property_value" : "HBase Shell",
+        "stack_version" : "1.3.0",
+        "property_name" : "hbase_nice_name",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_port",
+      "StackConfigurations" : {
+        "property_description" : "Webserver listens on this address and port",
+        "property_value" : "8000",
+        "stack_version" : "1.3.0",
+        "property_name" : "http_port",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_host",
+      "StackConfigurations" : {
+        "property_description" : "The SMTP server information for email notification delivery.",
+        "property_value" : "localhost",
+        "stack_version" : "1.3.0",
+        "property_name" : "smtp_host",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_engine",
+      "StackConfigurations" : {
+        "property_description" : "Configuration options for specifying the Desktop Database.",
+        "property_value" : "mysql",
+        "stack_version" : "1.3.0",
+        "property_name" : "db_engine",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_500_debug_mode",
+      "StackConfigurations" : {
+        "property_description" : "Turn off backtrace for server error",
+        "property_value" : "1",
+        "stack_version" : "1.3.0",
+        "property_name" : "http_500_debug_mode",
+        "service_name" : "HUE",
+        "type" : "hue-site.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 545 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCEv2.json

@@ -0,0 +1,545 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.shuffle.merge.percent",
+      "StackConfigurations" : {
+        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
+        "property_value" : "0.66",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.shuffle.merge.percent",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.inmem.merge.threshold",
+      "StackConfigurations" : {
+        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
+        "property_value" : "1000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.inmem.merge.threshold",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+      "StackConfigurations" : {
+        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
+        "property_value" : "15",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/io.sort.factor",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "100",
+        "stack_version" : "2.0.1",
+        "property_name" : "io.sort.factor",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.queue.default.acl-administer-jobs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "*",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.queue.default.acl-administer-jobs",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-queue-acls.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.queue.default.acl-submit-job",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "*",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.queue.default.acl-submit-job",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-queue-acls.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.completeuserjobs.maximum",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "5",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+      "StackConfigurations" : {
+        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
+        "property_value" : "50000000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.restart.recover",
+      "StackConfigurations" : {
+        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.restart.recover",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.healthChecker.interval",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "135000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.healthChecker.interval",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.shuffle.input.buffer.percent",
+      "StackConfigurations" : {
+        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
+        "property_value" : "0.7",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.shuffle.input.buffer.percent",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.reuse.jvm.num.tasks",
+      "StackConfigurations" : {
+        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
+        "property_value" : "1",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.reuse.jvm.num.tasks",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.jobhistory.done-dir",
+      "StackConfigurations" : {
+        "property_description" : "Directory where history files are managed by the MR JobHistory Server.",
+        "property_value" : "/mr-history/done",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.jobhistory.done-dir",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.jobhistory.intermediate-done-dir",
+      "StackConfigurations" : {
+        "property_description" : "Directory where history files are written by MapReduce jobs.",
+        "property_value" : "/mr-history/tmp",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.jobhistory.intermediate-done-dir",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.tracker.persist.jobstatus.hours",
+      "StackConfigurations" : {
+        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
+        "property_value" : "1",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.system.dir",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "/mapred/system",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.system.dir",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.queue.names",
+      "StackConfigurations" : {
+        "property_description" : " Comma separated list of queues configured for this jobtracker.",
+        "property_value" : "default",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.queue.names",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.tracker.history.completed.location",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "/mapred/history/done",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.tracker.history.completed.location",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.shuffle.port",
+      "StackConfigurations" : {
+        "property_description" : "Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.",
+        "property_value" : "8081",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.shuffle.port",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.tracker.persist.jobstatus.active",
+      "StackConfigurations" : {
+        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.map.tasks.speculative.execution",
+      "StackConfigurations" : {
+        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.map.tasks.speculative.execution",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.retirejob.interval",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "21600000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.retirejob.interval",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.jobtracker.staging.root.dir",
+      "StackConfigurations" : {
+        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
+        "property_value" : "/user",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.jobtracker.staging.root.dir",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.healthChecker.script.timeout",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "60000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.healthChecker.script.timeout",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.max.tracker.blacklists",
+      "StackConfigurations" : {
+        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
+        "property_value" : "16",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.max.tracker.blacklists",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.output.compression.type",
+      "StackConfigurations" : {
+        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
+        "property_value" : "BLOCK",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.output.compression.type",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.retirejob.check",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "10000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.retirejob.check",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.child.root.logger",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "INFO,TLA",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.child.root.logger",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/hadoop.job.history.user.location",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "none",
+        "stack_version" : "2.0.1",
+        "property_name" : "hadoop.job.history.user.location",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.task.timeout",
+      "StackConfigurations" : {
+        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
+        "property_value" : "600000",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.task.timeout",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.reduce.tasks.speculative.execution",
+      "StackConfigurations" : {
+        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.reduce.tasks.speculative.execution",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.tracker.handler.count",
+      "StackConfigurations" : {
+        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
+        "property_value" : "50",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.tracker.handler.count",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.reduce.slowstart.completed.maps",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "0.05",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.reduce.slowstart.completed.maps",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.job.reduce.input.buffer.percent",
+      "StackConfigurations" : {
+        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
+        "property_value" : "0.0",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.job.reduce.input.buffer.percent",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.reduce.input.limit",
+      "StackConfigurations" : {
+        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
+        "property_value" : "10737418240",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.reduce.input.limit",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/io.sort.record.percent",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : ".2",
+        "stack_version" : "2.0.1",
+        "property_name" : "io.sort.record.percent",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+      "StackConfigurations" : {
+        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
+        "property_value" : "250",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+      "StackConfigurations" : {
+        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
+        "property_value" : "180",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.history.server.embedded",
+      "StackConfigurations" : {
+        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
+        "property_value" : "false",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.history.server.embedded",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.tasktracker.group",
+      "StackConfigurations" : {
+        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+        "property_value" : "hadoop",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.tasktracker.group",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/tasktracker.http.threads",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "50",
+        "stack_version" : "2.0.1",
+        "property_name" : "tasktracker.http.threads",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/jetty.connector",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
+        "stack_version" : "2.0.1",
+        "property_name" : "jetty.connector",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapred.reduce.parallel.copies",
+      "StackConfigurations" : {
+        "property_description" : "No description",
+        "property_value" : "30",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapred.reduce.parallel.copies",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCEv2/configurations/mapreduce.cluster.administrators",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : " hadoop",
+        "stack_version" : "2.0.1",
+        "property_name" : "mapreduce.cluster.administrators",
+        "service_name" : "MAPREDUCEv2",
+        "stack_name" : "HDP",
+        "type" : "mapred-site.xml"
+      }
+    }
+  ]
+}

+ 41 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/NAGIOS.json

@@ -0,0 +1,41 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations?fields=*&_=1368459065260",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_group",
+      "StackConfigurations" : {
+        "property_description" : "Nagios Group.",
+        "property_value" : "nagios",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_group",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_web_login",
+      "StackConfigurations" : {
+        "property_description" : "Nagios web user.",
+        "property_value" : "nagiosadmin",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_web_login",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_user",
+      "StackConfigurations" : {
+        "property_description" : "Nagios Username.",
+        "property_value" : "nagios",
+        "stack_version" : "1.3.0",
+        "property_name" : "nagios_user",
+        "service_name" : "NAGIOS",
+        "type" : "global.xml",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 317 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/OOZIE.json

@@ -0,0 +1,317 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.systemmode",
+      "StackConfigurations" : {
+        "property_description" : "\n     System mode for  Oozie at startup.\n     ",
+        "property_value" : "NORMAL",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.systemmode",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
+      "StackConfigurations" : {
+        "property_description" : "The mapping from kerberos principal names to local OS user names.",
+        "property_value" : "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT\n        ",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.authentication.kerberos.name.rules",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
+      "StackConfigurations" : {
+        "property_description" : "\n     Specifies whether security (user name/admin role) is enabled or not.\n     If disabled any user can manage Oozie system and manage any job.\n     ",
+        "property_value" : "true",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.AuthorizationService.security.enabled",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
+      "StackConfigurations" : {
+        "property_description" : "Number of threads used for executing callables",
+        "property_value" : "10",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.CallableQueueService.threads",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
+      "StackConfigurations" : {
+        "property_description" : "\n      Whitelisted job tracker for Oozie service.\n      ",
+        "property_value" : " ",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
+      "StackConfigurations" : {
+        "property_description" : "\n             Max number of connections.\n        ",
+        "property_value" : "10",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.pool.max.active.conn",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.base.url",
+      "StackConfigurations" : {
+        "property_description" : "Base Oozie URL.",
+        "property_value" : "http://localhost:11000/oozie",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.base.url",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
+      "StackConfigurations" : {
+        "property_description" : "\n     Jobs older than this value, in days, will be purged by the PurgeService.\n     ",
+        "property_value" : "30",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.PurgeService.older.than",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
+      "StackConfigurations" : {
+        "property_description" : "\n      ",
+        "property_value" : " ",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
+      "StackConfigurations" : {
+        "property_description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n      -1 means infinite timeout",
+        "property_value" : "120",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.coord.normal.default.timeout",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
+      "StackConfigurations" : {
+        "property_description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n      ",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
+      "StackConfigurations" : {
+        "property_description" : "Max callable queue size",
+        "property_value" : "1000",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.CallableQueueService.queue.size",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor\n        ",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.ActionService.executor.ext.classes",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
+      "StackConfigurations" : {
+        "property_description" : "\n            DB user password.\n\n            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n                       if empty Configuration assumes it is NULL.\n        ",
+        "property_value" : " ",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.jdbc.password",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
+      "StackConfigurations" : {
+        "property_description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n      ",
+        "property_value" : "/user/${user.name}/share/lib",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.WorkflowAppService.system.libpath",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
+      "StackConfigurations" : {
+        "property_description" : "\n            JDBC driver class.\n        ",
+        "property_value" : "org.apache.derby.jdbc.EmbeddedDriver",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.jdbc.driver",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
+      "StackConfigurations" : {
+        "property_description" : "\n          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n          the relevant Hadoop *-site.xml files. If the path is relative is looked within\n          the Oozie configuration directory; though the path can be absolute (i.e. to point\n          to Hadoop client conf/ directories in the local filesystem.\n      ",
+        "property_value" : "*=/etc/hadoop/conf",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
+      "StackConfigurations" : {
+        "property_description" : "\n     Interval at which the purge service will run, in seconds.\n     ",
+        "property_value" : "3600",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.PurgeService.purge.interval",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
+      "StackConfigurations" : {
+        "property_description" : "\n            Creates Oozie DB.\n\n            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n        ",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.create.db.schema",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.system.id",
+      "StackConfigurations" : {
+        "property_description" : "\n    The Oozie system ID.\n    ",
+        "property_value" : "oozie-${user.name}",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.system.id",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
+      "StackConfigurations" : {
+        "property_description" : "\n            DB user name.\n        ",
+        "property_value" : "sa",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.jdbc.username",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.db.schema.name",
+      "StackConfigurations" : {
+        "property_description" : "\n      Oozie DataBase Name\n     ",
+        "property_value" : "oozie",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.db.schema.name",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.type",
+      "StackConfigurations" : {
+        "property_description" : "\n      ",
+        "property_value" : "simple",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.authentication.type",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
+      "StackConfigurations" : {
+        "property_description" : "\n            JDBC URL.\n        ",
+        "property_value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.JPAService.jdbc.url",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
+      "StackConfigurations" : {
+        "property_description" : "\n     Maximum concurrency for a given callable type.\n     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n     All commands that use action executors (action-start, action-end, action-kill and action-check) use\n     the action type as the callable type.\n     ",
+        "property_value" : "3",
+        "stack_version" : "1.3.0",
+        "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
+        "filename" : "oozie-site.xml",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/PIG.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
+  "items" : [ ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/SQOOP.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
+  "items" : [ ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/TEZ.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/TEZ/configurations?fields=*",
+  "items" : [ ]
+}

+ 173 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/WEBHCAT.json

@@ -0,0 +1,173 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
+      "StackConfigurations" : {
+        "property_description" : "The hdfs path to the Hadoop streaming jar file.",
+        "property_value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.streaming.jar",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.override.enabled",
+      "StackConfigurations" : {
+        "property_description" : "\n     Enable the override path in templeton.override.jars\n   ",
+        "property_value" : "false",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.override.enabled",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.archive",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Pig archive.",
+        "property_value" : "hdfs:///apps/webhcat/pig.tar.gz",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.pig.archive",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hadoop executable.",
+        "property_value" : "/usr/bin/hadoop",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.hadoop",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hcat",
+      "StackConfigurations" : {
+        "property_description" : "The path to the hcatalog executable.",
+        "property_value" : "/usr/bin/hcat",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.hcat",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
+      "StackConfigurations" : {
+        "property_description" : "Time out for templeton api",
+        "property_value" : "60000",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.exec.timeout",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hadoop configuration.",
+        "property_value" : "/etc/hadoop/conf",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.hadoop.conf.dir",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.jar",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Templeton jar file.",
+        "property_value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.jar",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.libjars",
+      "StackConfigurations" : {
+        "property_description" : "Jars to add the the classpath.",
+        "property_value" : "/usr/lib/zookeeper/zookeeper.jar",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.libjars",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.port",
+      "StackConfigurations" : {
+        "property_description" : "The HTTP port for the main server.",
+        "property_value" : "50111",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.port",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.storage.class",
+      "StackConfigurations" : {
+        "property_description" : "The class to use as storage",
+        "property_value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.storage.class",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.archive",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hive archive.",
+        "property_value" : "hdfs:///apps/webhcat/hive.tar.gz",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.hive.archive",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.path",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Hive executable.",
+        "property_value" : "hive.tar.gz/hive/bin/hive",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.hive.path",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.path",
+      "StackConfigurations" : {
+        "property_description" : "The path to the Pig executable.",
+        "property_value" : "pig.tar.gz/pig/bin/pig",
+        "stack_version" : "1.3.0",
+        "property_name" : "templeton.pig.path",
+        "filename" : "webhcat-site.xml",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      }
+    }
+  ]
+}

+ 461 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/YARN.json

@@ -0,0 +1,461 @@
+{
+  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations?fields=*",
+  "items" : [
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.container-monitor.interval-ms",
+      "StackConfigurations" : {
+        "property_description" : "The interval, in milliseconds, for which the node manager\n    waits  between two cycles of monitoring its containers' memory usage. \n    ",
+        "property_value" : "3000",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.container-monitor.interval-ms",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.resourcemanager.scheduler.class",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.resourcemanager.scheduler.class",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.resourcemanager.admin.address",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-RMNODE-HOSTNAME:8141",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.resourcemanager.admin.address",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.queues",
+      "StackConfigurations" : {
+        "property_description" : "\n      The queues at the this level (root is the root queue).\n    ",
+        "property_value" : "default",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.queues",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.capacity",
+      "StackConfigurations" : {
+        "property_description" : "\n      The total capacity as a percentage out of 100 for this queue.\n      If it has child queues then this includes their capacity as well.\n      The child queues capacity should add up to their parent queue's capacity\n      or less.\n    ",
+        "property_value" : "100",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.capacity",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.acl_submit_jobs",
+      "StackConfigurations" : {
+        "property_description" : "\n      The ACL of who can submit jobs to the default queue.\n    ",
+        "property_value" : "*",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.acl_submit_jobs",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.log-aggregation.compression-type",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "gz",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.log-aggregation.compression-type",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.delete.debug-delay-sec",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "36000",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.delete.debug-delay-sec",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.address",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "0.0.0.0:45454",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.address",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.resource.memory-mb",
+      "StackConfigurations" : {
+        "property_description" : "Amount of physical memory, in MB, that can be allocated\n      for containers.",
+        "property_value" : "8192",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.resource.memory-mb",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.maximum-capacity",
+      "StackConfigurations" : {
+        "property_description" : "\n      The maximum capacity of the default queue. \n    ",
+        "property_value" : "100",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.maximum-capacity",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.resourcemanager.resource-tracker.address",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-RMNODE-HOSTNAME:8025",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.resourcemanager.resource-tracker.address",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.vmem-pmem-ratio",
+      "StackConfigurations" : {
+        "property_description" : "Ratio between virtual memory to physical memory when\n    setting memory limits for containers. Container allocations are\n    expressed in terms of physical memory, and virtual memory usage\n    is allowed to exceed this allocation by this ratio.\n    ",
+        "property_value" : "2.1",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.vmem-pmem-ratio",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.remote-app-log-dir-suffix",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "logs",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.remote-app-log-dir-suffix",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.health-checker.script.timeout-ms",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "60000",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.health-checker.script.timeout-ms",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.local-dirs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-YARN-LOCAL-DIR",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.local-dirs",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.user-limit-factor",
+      "StackConfigurations" : {
+        "property_description" : "\n      Default queue user limit a percentage from 0.0 to 1.0.\n    ",
+        "property_value" : "1",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.user-limit-factor",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.maximum-am-resource-percent",
+      "StackConfigurations" : {
+        "property_description" : "\n      Maximum percent of resources in the cluster which can be used to run \n      application masters i.e. controls number of concurrent running\n      applications.\n    ",
+        "property_value" : "0.1",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.maximum-am-resource-percent",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.state",
+      "StackConfigurations" : {
+        "property_description" : "\n      The state of the default queue. State can be one of RUNNING or STOPPED.\n    ",
+        "property_value" : "RUNNING",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.state",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.acl_administer_queues",
+      "StackConfigurations" : {
+        "property_description" : "\n      The ACL for who can administer this queue i.e. change sub-queue \n      allocations.\n    ",
+        "property_value" : "*",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.acl_administer_queues",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.log.retain-second",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "604800",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.log.retain-second",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.acl_administer_jobs",
+      "StackConfigurations" : {
+        "property_description" : "\n      The ACL of who can administer jobs on the default queue.\n    ",
+        "property_value" : "*",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.acl_administer_jobs",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.application.classpath",
+      "StackConfigurations" : {
+        "property_description" : "Classpath for typical applications.",
+        "property_value" : "/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.application.classpath",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.resourcemanager.address",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-RMNODE-HOSTNAME:8050",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.resourcemanager.address",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.log-dirs",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-YARN-LOG-DIR",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.log-dirs",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.maximum-allocation-mb",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "8192",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.maximum-allocation-mb",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.container-executor.class",
+      "StackConfigurations" : {
+        "property_description" : "ContainerExecutor for launching containers",
+        "property_value" : "org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.container-executor.class",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.remote-app-log-dir",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "/app-logs",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.remote-app-log-dir",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.unfunded.capacity",
+      "StackConfigurations" : {
+        "property_description" : "\n      No description\n    ",
+        "property_value" : "50",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.unfunded.capacity",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.health-checker.interval-ms",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "135000",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.health-checker.interval-ms",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.minimum-allocation-mb",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "1024",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.minimum-allocation-mb",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.maximum-applications",
+      "StackConfigurations" : {
+        "property_description" : "\n      Maximum number of applications that can be pending and running.\n    ",
+        "property_value" : "10000",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.maximum-applications",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.aux-services",
+      "StackConfigurations" : {
+        "property_description" : "Auxilliary services of NodeManager",
+        "property_value" : "mapreduce.shuffle",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.aux-services",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.health-checker.script.path",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "/etc/hadoop/conf/health_check",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.health-checker.script.path",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.log-aggregation-enable",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "true",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.log-aggregation-enable",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.nodemanager.aux-services.class",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "org.apache.hadoop.mapred.ShuffleHandler",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.nodemanager.aux-services.class",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.resourcemanager.scheduler.address",
+      "StackConfigurations" : {
+        "property_description" : null,
+        "property_value" : "TODO-RMNODE-HOSTNAME:8030",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.resourcemanager.scheduler.address",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "yarn-site.xml"
+      }
+    },
+    {
+      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN/configurations/yarn.scheduler.capacity.root.default.capacity",
+      "StackConfigurations" : {
+        "property_description" : "Default queue target capacity.",
+        "property_value" : "100",
+        "stack_version" : "2.0.1",
+        "property_name" : "yarn.scheduler.capacity.root.default.capacity",
+        "service_name" : "YARN",
+        "stack_name" : "HDP",
+        "type" : "capacity-scheduler.xml"
+      }
+    }
+  ]
+}

+ 4 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/ZOOKEEPER.json

@@ -0,0 +1,4 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/ZOOKEEPER/configurations?fields=*",
+  "items" : [ ]
+}

+ 2490 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/global.json

@@ -0,0 +1,2490 @@
+{
+  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices?fields=configurations/StackConfigurations/filename",
+  "items" : [
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "HIVE",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.metastore.cache.pinobjtypes",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "javax.jdo.option.ConnectionDriverName",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.metastore.execute.setugi",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs.hdfs.impl.disable.cache",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.metastore.client.socket.timeout",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive_pid_dir",
+            "filename" : "global.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.semantic.analyzer.factory.impl",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mysql_connector_url",
+            "filename" : "global.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive_log_dir",
+            "filename" : "global.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.local",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.metastore.local",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive_conf_dir",
+            "filename" : "global.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.security.authorization.enabled",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.security.authorization.manager",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.server2.enable.doAs",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive.metastore.warehouse.dir",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hive_aux_jars_path",
+            "filename" : "global.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hadoop.clientside.fs.operations",
+            "filename" : "hive-site.xml",
+            "service_name" : "HIVE",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "NAGIOS",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [ ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "HCATALOG",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [ ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "MAPREDUCE",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.parallel.copies",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.reduce.parallel.copies",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-user-limit-factor",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-user-limit-factor",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-supports-priority",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-supports-priority",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.handler.count",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.tracker.handler.count",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.max.tracker.blacklists",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.max.tracker.blacklists",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.map.tasks.speculative.execution",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.map.tasks.speculative.execution",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.active",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.tracker.persist.jobstatus.active",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.input.buffer.percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.shuffle.input.buffer.percent",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/tasktracker.http.threads",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "tasktracker.http.threads",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.history.server.embedded",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.history.server.embedded",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.hours",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.completeuserjobs.maximum",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_system_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred_system_dir",
+            "filename" : "global.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/scheduler_name",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "scheduler_name",
+            "filename" : "global.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-submit-job",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.queue.default.acl-submit-job",
+            "filename" : "mapred-queue-acls.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.slowstart.completed.maps",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.reduce.slowstart.completed.maps",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.capacity",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.capacity",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.merge.percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.shuffle.merge.percent",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.retirejob.interval",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.factor",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "io.sort.factor",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.child.root.logger",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.child.root.logger",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.tracker.history.completed.location",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.tracker.history.completed.location",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jetty.connector",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "jetty.connector",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reduce.input.buffer.percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.reduce.input.buffer.percent",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_newsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "jtnode_opt_newsize",
+            "filename" : "global.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/hadoop.job.history.user.location",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hadoop.job.history.user.location",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-worker-threads",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.init-worker-threads",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred_local_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred_local_dir",
+            "filename" : "global.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.check",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.retirejob.check",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.tasktracker.group",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.tasktracker.group",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/io.sort.record.percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "io.sort.record.percent",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-administer-jobs",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.queue.default.acl-administer-jobs",
+            "filename" : "mapred-queue-acls.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.user-limit-factor",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.user-limit-factor",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.reduce.tasks.speculative.execution",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.reduce.tasks.speculative.execution",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-capacity",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.maximum-capacity",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.restart.recover",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.restart.recover",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.output.compression.type",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.output.compression.type",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.system.dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.system.dir",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.supports-priority",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.supports-priority",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.job.reuse.jvm.num.tasks",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.job.reuse.jvm.num.tasks",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.healthChecker.interval",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.cluster.administrators",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.cluster.administrators",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.staging.root.dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.jobtracker.staging.root.dir",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-poll-interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.init-poll-interval",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.queue.names",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.queue.names",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapreduce.reduce.input.limit",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapreduce.reduce.input.limit",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.maximum-system-jobs",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.maximum-system-jobs",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-minimum-user-limit-percent",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-minimum-user-limit-percent",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.task.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.task.timeout",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-init-accept-jobs-factor",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.capacity-scheduler.default-init-accept-jobs-factor",
+            "filename" : "capacity-scheduler.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.healthChecker.script.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.healthChecker.script.timeout",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/jtnode_opt_maxnewsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "jtnode_opt_maxnewsize",
+            "filename" : "global.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.inmem.merge.threshold",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.inmem.merge.threshold",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+            "filename" : "mapred-site.xml",
+            "service_name" : "MAPREDUCE",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/ZOOKEEPER",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "ZOOKEEPER",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [ ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/SQOOP",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "SQOOP",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [ ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "HBASE",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase.zookeeper.useMulti",
+            "filename" : "hbase-site.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstore_compactionthreshold",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hstore_compactionthreshold",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_blockcache_size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hfile_blockcache_size",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_pid_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_pid_dir",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_regionserver_heapsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_regionserver_heapsize",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/client_scannercaching",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "client_scannercaching",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/regionserver_handlers",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "regionserver_handlers",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase.regionserver.optionalcacheflushinterval",
+            "filename" : "hbase-site.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.admin.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.admin.protocol.acl",
+            "filename" : "hbase-policy.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_blockmultiplier",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hregion_blockmultiplier",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_master_heapsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_master_heapsize",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase.master.lease.thread.wakefrequency",
+            "filename" : "hbase-site.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_conf_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_conf_dir",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_log_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_log_dir",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.masterregion.protocol.acl",
+            "filename" : "hbase-policy.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_tmp_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_tmp_dir",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_majorcompaction",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hregion_majorcompaction",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_max_keyvalue_size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hfile_max_keyvalue_size",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/zookeeper_sessiontimeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "zookeeper_sessiontimeout",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.cluster.distributed",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase.cluster.distributed",
+            "filename" : "hbase-site.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase.zookeeper.property.clientPort",
+            "filename" : "hbase-site.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstorefile_maxsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hstorefile_maxsize",
+            "filename" : "global.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.client.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.client.protocol.acl",
+            "filename" : "hbase-policy.xml",
+            "service_name" : "HBASE",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "OOZIE",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.jdbc.driver",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.PurgeService.purge.interval",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.jdbc.username",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.type",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.authentication.type",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.ActionService.executor.ext.classes",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.systemmode",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.systemmode",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.db.schema.name",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.db.schema.name",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.jdbc.url",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.jdbc.password",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.create.db.schema",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.system.id",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.system.id",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.base.url",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.base.url",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.PurgeService.older.than",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.JPAService.pool.max.active.conn",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.WorkflowAppService.system.libpath",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.AuthorizationService.security.enabled",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.coord.normal.default.timeout",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.authentication.kerberos.name.rules",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.CallableQueueService.threads",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "oozie.service.CallableQueueService.queue.size",
+            "filename" : "oozie-site.xml",
+            "service_name" : "OOZIE",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "PIG",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [ ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "HDFS",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs_datanode_failed_volume_tolerated",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs.trash.interval",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "io.serializations",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.secondary.https.port",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.datanode.ipc.address",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.datanode.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hadoop_heapsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ipc.client.idlethreshold",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.inter.tracker.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "io.file.buffer.size",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs.checkpoint.size",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.heartbeat.interval",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.block.access.token.enable",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.web.ugi",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "kerberos_domain",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.balance.bandwidthPerSec",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs_name_dir",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "datanode_du_reserved",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.datanode.du.pct",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.safemode.threshold.pct",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_maxnewsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.inter.datanode.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ipc.server.max.response.size",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.datanode.socket.write.timeout",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.client.datanode.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "keytab_path",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.datanode.failed.volumes.tolerated",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.permissions",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.datanode.max.xcievers",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "io.compression.codec.lzo.class",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.cluster.administrators",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_opt_newsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.access.time.precision",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ipc.server.read.threadpool.size",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security_enabled",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.umaskmode",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.namenode.handler.count",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.replication.max",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "webinterface.private.actions",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.permissions.supergroup",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hadoop_pid_dir_prefix",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs.checkpoint.edits.dir",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.block.size",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.task.umbilical.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.job.submission.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ipc.client.connect.max.retries",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.https.port",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.client.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.namenode.handler.count",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ipc.client.connection.maxidletime",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs_webhdfs_enabled",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hdfs_log_dir_prefix",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs.checkpoint.period",
+            "filename" : "core-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs_checkpoint_size",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "security.namenode.protocol.acl",
+            "filename" : "hadoop-policy.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs.blockreport.initialDelay",
+            "filename" : "hdfs-site.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs_checkpoint_period",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "fs_checkpoint_dir",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dtnode_heapsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "dfs_data_dir",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "namenode_heapsize",
+            "filename" : "global.xml",
+            "service_name" : "HDFS",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "WEBHCAT",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.port",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.archive",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.pig.archive",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.archive",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.hive.archive",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.streaming.jar",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.jar",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.jar",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hcat",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.hcat",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.hadoop",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hive.path",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.hive.path",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.hadoop.conf.dir",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.storage.class",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.storage.class",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.override.enabled",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.override.enabled",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.pig.path",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.pig.path",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.libjars",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.libjars",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "templeton.exec.timeout",
+            "filename" : "webhcat-site.xml",
+            "service_name" : "WEBHCAT",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "HUE",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/jobtracker_port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "jobtracker_port",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_user",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_user",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_host",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "http_host",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_port",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/whitelist",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "whitelist",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/django_debug_mode",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "django_debug_mode",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_host",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "smtp_host",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_password",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "smtp_password",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_engine",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_engine",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/send_debug_messages",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "send_debug_messages",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/tls",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "tls",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/bash_shell_command",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "bash_shell_command",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/pig_shell_command",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "pig_shell_command",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_500_debug_mode",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "http_500_debug_mode",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hadoop_mapred_home",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hadoop_mapred_home",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/time_zone",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "time_zone",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_host",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_host",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_password",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_password",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/database_logging",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "database_logging",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_name",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "db_name",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_shell_command",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_shell_command",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/default_from_email",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "default_from_email",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/use_cherrypy_server",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "use_cherrypy_server",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "http_port",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_nice_name",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "hbase_nice_name",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/backend_auth_policy",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "backend_auth_policy",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_port",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "smtp_port",
+            "filename" : "hue-site.xml",
+            "service_name" : "HUE",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    },
+    {
+      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA",
+      "StackServices" : {
+        "stack_version" : "1.3.0",
+        "service_name" : "GANGLIA",
+        "stack_name" : "HDP"
+      },
+      "configurations" : [
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ganglia_runtime_dir",
+            "filename" : "global.xml",
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "gmetad_user",
+            "filename" : "global.xml",
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "rrdcached_base_dir",
+            "filename" : "global.xml",
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP"
+          }
+        },
+        {
+          "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
+          "StackConfigurations" : {
+            "stack_version" : "1.3.0",
+            "property_name" : "ganglia_conf_dir",
+            "filename" : "global.xml",
+            "service_name" : "GANGLIA",
+            "stack_name" : "HDP"
+          }
+        }
+      ]
+    }
+  ]
+}

+ 2 - 1
ambari-web/app/controllers/wizard.js

@@ -457,7 +457,8 @@ App.WizardController = Em.Controller.extend({
       name: 'wizard.service_components',
       name: 'wizard.service_components',
       sender: this,
       sender: this,
       data: {
       data: {
-        stackUrl: App.get('stack2VersionURL')
+        stackUrl: App.get('stack2VersionURL'),
+        stackVersion: App.currentStackVersion.replace(/HDP(Local)?-/, '')
       },
       },
       success: 'loadServiceComponentsSuccessCallback',
       success: 'loadServiceComponentsSuccessCallback',
       error: 'loadServiceComponentsErrorCallback'
       error: 'loadServiceComponentsErrorCallback'

+ 3 - 2
ambari-web/app/controllers/wizard/step3_controller.js

@@ -280,8 +280,9 @@ App.WizardStep3Controller = Em.Controller.extend({
       App.router.get('installerController').setLowerStepsDisable(3);
       App.router.get('installerController').setLowerStepsDisable(3);
       this.set('isSubmitDisabled', true);
       this.set('isSubmitDisabled', true);
     } else {
     } else {
-      App.router.get('installerController.isStepDisabled').findProperty('step', 1).set('value', false);
-      App.router.get('installerController.isStepDisabled').findProperty('step', 2).set('value', false);
+      App.router.get('installerController.isStepDisabled').filter(function(step){
+        if(step.step >= 0 && step.step <= 2) return true;
+      }).setEach('value', false);
     }
     }
   }.observes('isInstallInProgress'),
   }.observes('isInstallInProgress'),
 
 

+ 2 - 2
ambari-web/app/utils/ajax.js

@@ -201,7 +201,7 @@ var urls = {
   },
   },
   'config.advanced': {
   'config.advanced': {
     'real': '{stack2VersionUrl}/stackServices/{serviceName}/configurations?fields=*',
     'real': '{stack2VersionUrl}/stackServices/{serviceName}/configurations?fields=*',
-    'mock': '/data/wizard/stack/hdp/version130/{serviceName}.json',
+    'mock': '/data/wizard/stack/hdp/version{stackVersion}/{serviceName}.json',
     'format': function (data) {
     'format': function (data) {
       return {
       return {
         async: false
         async: false
@@ -579,7 +579,7 @@ var urls = {
   },
   },
   'wizard.service_components': {
   'wizard.service_components': {
     'real': '{stackUrl}/stackServices?fields=StackServices',
     'real': '{stackUrl}/stackServices?fields=StackServices',
-    'mock': '/data/wizard/stack/hdp/version/1.3.0.json',
+    'mock': '/data/wizard/stack/hdp/version/{stackVersion}.json',
     'format': function (data, opt) {
     'format': function (data, opt) {
       return {
       return {
         async: false
         async: false

+ 2 - 1
ambari-web/app/utils/config.js

@@ -483,7 +483,8 @@ App.config = Em.Object.create({
       sender: this,
       sender: this,
       data: {
       data: {
         serviceName: serviceName,
         serviceName: serviceName,
-        stack2VersionUrl: App.get('stack2VersionURL')
+        stack2VersionUrl: App.get('stack2VersionURL'),
+        stackVersion: App.currentStackVersion.replace(/HDP(Local)?-/, '')
       },
       },
       success: 'loadAdvancedConfigSuccess'
       success: 'loadAdvancedConfigSuccess'
     });
     });