Просмотр исходного кода

Backporting for branch-1.2.4

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/branches/branch-1.2.4@1488039 13f79535-47bb-0310-9956-ffa450edef68
Yusaku Sako 12 лет назад
Родитель
Сommit
37c2c55120
100 измененных файлов с 1545 добавлено и 7324 удалено
  1. 0 28
      ambari-agent/pom.xml
  2. 2 8
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp
  3. 1 1
      ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp
  4. 0 2
      ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb
  5. 0 6
      ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp
  6. 1 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb
  7. 1 3
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb
  8. 0 32
      ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh
  9. 12 62
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp
  10. 22 5
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp
  11. 2 2
      ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp
  12. 0 26
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb
  13. 1 2
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb
  14. 1 1
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb
  15. 1 1
      ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb
  16. 5 10
      ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp
  17. 4 26
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
  18. 0 3
      ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
  19. 1 5
      ambari-agent/src/main/python/ambari_agent/AmbariConfig.py
  20. 25 30
      ambari-agent/src/main/python/ambari_agent/LiveStatus.py
  21. 0 61
      ambari-server/pom.xml
  22. 1 1
      ambari-server/src/main/conf/ambari.properties
  23. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/Role.java
  24. 0 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java
  25. 0 3
      ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java
  26. 11 11
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterControllerImpl.java
  27. 0 3
      ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java
  28. 23 355
      ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateBuilder.java
  29. 0 6
      ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java
  30. 236 396
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java
  31. 198 321
      ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java
  32. 5 17
      ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java
  33. 77 126
      ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java
  34. 1 1
      ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql
  35. 0 1058
      ambari-server/src/main/resources/ganglia_properties.json
  36. 1 1
      ambari-server/src/main/resources/mysql-ddl.sql
  37. 1 1
      ambari-server/src/main/resources/oracle-DDL.sql
  38. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml
  39. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.1/services/ZOOKEEPER/metainfo.xml
  40. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/metainfo.xml
  41. 0 22
      ambari-server/src/main/resources/stacks/HDP/2.0.1/metainfo.xml
  42. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/container-executor.cfg
  43. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/core-site.xml
  44. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-queue-acls.xml
  45. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-site.xml
  46. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/metainfo.xml
  47. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml
  48. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
  49. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml
  50. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml
  51. 6 40
      ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql
  52. 2 2
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  53. 2 2
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaServiceTest.java
  54. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
  55. 2 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java
  56. 0 8
      ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java
  57. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java
  58. 1 1
      ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationTestModule.java
  59. 1 1
      ambari-server/src/test/resources/stacks/HDP/0.2/services/ZOOKEEPER/metainfo.xml
  60. 1 1
      ambari-server/src/test/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml
  61. 1 4
      ambari-web/app/app.js
  62. 0 137
      ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.1.json
  63. 0 148
      ambari-web/app/assets/data/wizard/stack/hdp/version/2.0.1.json
  64. 0 113
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HBASE.json
  65. 0 4
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HCATALOG.json
  66. 0 533
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HDFS.json
  67. 0 149
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HIVE.json
  68. 0 725
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/MAPREDUCE.json
  69. 0 317
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/OOZIE.json
  70. 0 173
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/WEBHCAT.json
  71. 0 4
      ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/ZOOKEEPER.json
  72. 0 65
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/GANGLIA.json
  73. 0 41
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/NAGIOS.json
  74. 0 4
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/PIG.json
  75. 0 4
      ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/SQOOP.json
  76. 60 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HBASE.json
  77. 20 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HCATALOG.json
  78. 210 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HDFS.json
  79. 95 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/HIVE.json
  80. 230 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/MAPREDUCE.json
  81. 155 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/OOZIE.json
  82. 90 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/WEBHCAT.json
  83. 25 0
      ambari-web/app/assets/data/wizard/stack/hdp/version122/ZOOKEEPER.json
  84. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/HBASE.json
  85. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/HCATALOG.json
  86. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/HDFS.json
  87. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/HIVE.json
  88. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/HUE.json
  89. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/MAPREDUCE.json
  90. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/OOZIE.json
  91. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/WEBHCAT.json
  92. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/ZOOKEEPER.json
  93. 0 0
      ambari-web/app/assets/data/wizard/stack/hdp/version130/global.json
  94. 0 65
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/GANGLIA.json
  95. 0 281
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HBASE.json
  96. 0 4
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HCATALOG.json
  97. 0 737
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json
  98. 0 209
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HIVE.json
  99. 0 353
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HUE.json
  100. 0 545
      ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCE2.json

+ 0 - 28
ambari-agent/pom.xml

@@ -60,33 +60,6 @@
   </profiles>
   <build>
     <plugins>
-      <plugin>
-         <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.8</version>
-        <executions>
-          <execution>
-            <id>parse-version</id>
-            <phase>validate</phase>
-            <goals>
-              <goal>parse-version</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>regex-property</id>
-            <goals>
-              <goal>regex-property</goal>
-            </goals>
-            <configuration>
-              <name>ambariVersion</name>
-              <value>${project.version}</value>
-              <regex>-SNAPSHOT</regex>
-              <replacement></replacement>
-              <failIfNoMatch>false</failIfNoMatch>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.0</version>
@@ -325,7 +298,6 @@
               <sources>
                 <source>
                   <location>../version</location>
-                  <filter>true</filter>
                 </source>
               </sources>
             </mapping>

+ 2 - 8
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/monitor.pp

@@ -45,7 +45,7 @@ class hdp-ganglia::monitor(
       class { 'hdp-ganglia::config': ganglia_server_host => $ganglia_server_host}
     }
 
-    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-yarn::resourcemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::nodemanager'] == true) or ($hdp::params::service_exists['hdp-yarn::historyserver'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
+    if (($hdp::params::service_exists['hdp-hadoop::datanode'] == true) or ($hdp::params::service_exists['hdp-hadoop::namenode'] == true) or ($hdp::params::service_exists['hdp-hadoop::jobtracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::tasktracker'] == true) or ($hdp::params::service_exists['hdp-hadoop::client'] == true) or ($hdp::params::service_exists['hdp-hadoop::snamenode'] == true)) {
      class { 'hdp-hadoop::enable-ganglia': }
    }
 
@@ -101,12 +101,6 @@ class hdp-ganglia::monitor::config-gen()
   if ($hdp::params::is_jtnode_master) {
     hdp-ganglia::config::generate_monitor { 'HDPJobTracker':}
   }
-  if ($hdp::params::is_rmnode_master) {
-    hdp-ganglia::config::generate_monitor { 'HDPResourceManager':}
-  }
-  if ($hdp::params::is_hsnode_master) {
-    hdp-ganglia::config::generate_monitor { 'HDPHistoryServer':}
-  }
   if ($hdp::params::is_hbase_master) {
     hdp-ganglia::config::generate_monitor { 'HDPHBaseMaster':}
   }
@@ -167,4 +161,4 @@ class hdp-ganglia::server::delete_default_gmond_process() {
     path => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
     require => Class['hdp-ganglia::monitor::gmond']
   }
-}
+}

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-ganglia/manifests/server.pp

@@ -59,7 +59,7 @@ class hdp-ganglia::server(
     }
   }
   
-  hdp-ganglia::config::generate_server { ['HDPJobTracker','HDPNameNode','HDPSlaves','HDPResourceManager','HDPHistoryServer']:
+  hdp-ganglia::config::generate_server { ['HDPJobTracker','HDPNameNode','HDPSlaves']:
     ganglia_service => 'gmond',
     role => 'server'
   }

+ 0 - 2
ambari-agent/src/main/puppet/modules/hdp-ganglia/templates/gangliaClusters.conf.erb

@@ -23,5 +23,3 @@
     HDPNameNode         <%=scope.function_hdp_host("ganglia_server_host")%>  8661
     HDPJobTracker     	<%=scope.function_hdp_host("ganglia_server_host")%>  8662
     HDPHBaseMaster      <%=scope.function_hdp_host("ganglia_server_host")%>  8663
-    HDPResourceManager  <%=scope.function_hdp_host("ganglia_server_host")%>  8664
-    HDPHistoryServer    <%=scope.function_hdp_host("ganglia_server_host")%>  8666

+ 0 - 6
ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/namenode.pp

@@ -140,12 +140,6 @@ define hdp-hadoop::namenode::create_app_directories($service_state)
         owner         => $hdp::params::hbase_user,
         service_state => $service_state
       }
-     $hbase_staging_dir = $hdp::params::hbase_staging_dir
-     hdp-hadoop::hdfs::directory { $hbase_staging_dir:
-       owner         => $hdp::params::hbase_user,
-       service_state => $service_state,
-       mode             => '711',
-     }
     }
 
     if ($hdp::params::hive_server_host != "") {

+ 1 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties-GANGLIA.erb

@@ -34,6 +34,4 @@ jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host"
 tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
 maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
 reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
+

+ 1 - 3
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-metrics2.properties.erb

@@ -34,6 +34,4 @@ jobtracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host"
 tasktracker.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
 maptask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
 reducetask.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-resourcemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8664
-nodemanager.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8660
-historyserver.sink.ganglia.servers=<%=scope.function_hdp_host("ganglia_server_host")%>:8666
+

+ 0 - 32
ambari-agent/src/main/puppet/modules/hdp-hbase/files/hbaseSmokeVerify.sh

@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify

+ 12 - 62
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/hbase/service_check.pp

@@ -18,53 +18,28 @@
 # under the License.
 #
 #
-class hdp-hbase::hbase::service_check() inherits hdp-hbase::params
+class hdp-hbase::hbase::service_check() 
 {
   $smoke_test_user = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
+
   $output_file = "/apps/hbase/data/ambarismoketest"
   $conf_dir = $hdp::params::hbase_conf_dir
-  $smoke_user_keytab = "${hdp-hbase::params::keytab_path}/${smoke_test_user}.headless.keytab"
-  $hbase_user = $hdp-hbase::params::hbase_user
-  $hbase_keytab = "${hdp-hbase::params::keytab_path}/${hbase_user}.headless.keytab"
-  $test_cmd = "fs -test -e ${output_file}"
-  $serviceCheckData = hdp_unique_id_and_date()
-  $kinit_cmd = "${hdp::params::kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user};"
 
+  $test_cmd = "fs -test -e ${output_file}" 
+  
   anchor { 'hdp-hbase::hbase::service_check::begin':}
 
-  $hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-
-  file { '/tmp/hbaseSmokeVerify.sh':
+  file { '/tmp/hbaseSmoke.sh':
     ensure => present,
-    source => "puppet:///modules/hdp-hbase/hbaseSmokeVerify.sh",
-    mode => '0755',
-  }
-
-  file { $hbase_servicecheck_file:
+    source => "puppet:///modules/hdp-hbase/hbaseSmoke.sh",
     mode => '0755',
-    content => template('hdp-hbase/hbase-smoke.sh.erb'),
-  }
-  if ($security_enabled == true) {
-    $servicecheckcmd = "su - ${smoke_test_user} -c '$kinit_cmd hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '$kinit_cmd /tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
-  } else {
-    $servicecheckcmd = "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell $hbase_servicecheck_file'"
-    $smokeverifycmd = "su - ${smoke_test_user} -c '/tmp/hbaseSmokeVerify.sh $conf_dir ${serviceCheckData}'"
   }
 
-  exec { $hbase_servicecheck_file:
-    command   => $servicecheckcmd,
-    tries     => 3,
-    try_sleep => 5,
-    path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    logoutput => "true"
-  }
-
-  exec { '/tmp/hbaseSmokeVerify.sh':
-    command   => $smokeverifycmd,
+  exec { '/tmp/hbaseSmoke.sh':
+    command   => "su - ${smoke_test_user} -c 'hbase --config $conf_dir  shell /tmp/hbaseSmoke.sh'",
     tries     => 3,
     try_sleep => 5,
+    require   => File['/tmp/hbaseSmoke.sh'],
     path      => '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
     notify    => Hdp-hadoop::Exec-hadoop['hbase::service_check::test'],
     logoutput => "true"
@@ -73,34 +48,9 @@ class hdp-hbase::hbase::service_check() inherits hdp-hbase::params
   hdp-hadoop::exec-hadoop { 'hbase::service_check::test':
     command     => $test_cmd,
     refreshonly => true,
-    require     => Exec['/tmp/hbaseSmokeVerify.sh'],
+    require     => Exec['/tmp/hbaseSmoke.sh'],
     before      => Anchor['hdp-hbase::hbase::service_check::end'] #TODO: remove after testing
   }
-
-  if ($security_enabled == true) {
-    $hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-    $hbase_kinit_cmd = "${hdp::params::kinit_path_local} -kt ${hbase_keytab} ${hbase_user};"
-    $grantprivelegecmd = "$hbase_kinit_cmd hbase shell ${hbase_grant_premissions_file}"
-
-    file { $hbase_grant_premissions_file:
-      owner   => $hbase_user,
-      group   => $hdp::params::user_group,
-      mode => '0644',
-      content => template('hdp-hbase/hbase_grant_permissions.erb')
-      }
-      hdp::exec { '${smokeuser}_grant_privileges' :
-        command => $grantprivelegecmd,
-        require => File[$hbase_grant_premissions_file],
-        user => $hbase_user
-      }
-     Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-       File[$hbase_servicecheck_file] ->  File[$hbase_grant_premissions_file] ->
-       Hdp::Exec['${smokeuser}_grant_privileges'] -> Exec[$hbase_servicecheck_file] ->
-       Exec['/tmp/hbaseSmokeVerify.sh'] -> Anchor['hdp-hbase::hbase::service_check::end']
-  } else {
-    Anchor['hdp-hbase::hbase::service_check::begin'] ->  File['/tmp/hbaseSmokeVerify.sh']
-    File[$hbase_servicecheck_file] -> Exec[$hbase_servicecheck_file] -> Exec['/tmp/hbaseSmokeVerify.sh']
-    -> Anchor['hdp-hbase::hbase::service_check::end']
-  }
+  
   anchor{ 'hdp-hbase::hbase::service_check::end':}
-}
+}

+ 22 - 5
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/init.pp

@@ -29,7 +29,6 @@ class hdp-hbase(
   
   $hdp::params::component_exists['hdp-hbase'] = true
   $smokeuser = $hdp::params::smokeuser
-  $security_enabled = $hdp::params::security_enabled
 
   #Configs generation  
 
@@ -94,18 +93,36 @@ class hdp-hbase(
       override_owner => true
     }
 
-   hdp-hbase::configfile { ['hbase-env.sh','hadoop-metrics.properties']: 
+   hdp-hbase::configfile { ['hbase-env.sh','log4j.properties','hadoop-metrics.properties']: 
       type => $type
     }
 
     hdp-hbase::configfile { 'regionservers':}
 
     if ($security_enabled == true) {
-      if ($type == 'master' and $service_state == 'running') {
+      if ($type == 'master') {
         hdp-hbase::configfile { 'hbase_master_jaas.conf' : }
-      } elsif ($type == 'regionserver' and $service_state == 'running') {
+
+        $hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+
+        file { $hbase_grant_premissions_file:
+          owner   => $hbase_user,
+          group   => $hdp::params::user_group,
+          mode => '0644',
+          content => template('hdp-hbase/hbase_grant_permissions.erb')
+        }
+
+        hdp::exec { '${smokeuser}_grant_privileges' :
+          command => "su - ${smoke_test_user} -c 'hbase --config $conf_dir shell ${hbase_grant_premissions_file}'",
+          require => File[$hbase_grant_premissions_file]
+        }
+
+        Hdp-hbase::Configfile<||> -> File[$hbase_grant_premissions_file] ->
+        Hdp::Exec['${smokeuser}_grant_privileges'] -> Anchor['hdp-hbase::end']
+
+      } elsif ($type == 'regionserver') {
         hdp-hbase::configfile { 'hbase_regionserver_jaas.conf' : }
-      } elsif ($type == 'client') {
+      } else {
         hdp-hbase::configfile { 'hbase_client_jaas.conf' : }
       }
     }

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/params.pp

@@ -83,13 +83,13 @@ class hdp-hbase::params() inherits hdp::params
 
   $regionserver_memstore_upperlimit = hdp_default("hbase-site/regionserver.memstore.upperlimit","0.4")
 
-  $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
   $hbase_client_jaas_config_file = hdp_default("hbase_client_jaas_config_file", "${conf_dir}/hbase_client_jaas.conf")
   $hbase_master_jaas_config_file = hdp_default("hbase_master_jaas_config_file", "${conf_dir}/hbase_master_jaas.conf")
   $hbase_regionserver_jaas_config_file = hdp_default("hbase_regionserver_jaas_config_file", "${conf_dir}/hbase_regionserver_jaas.conf")
 
-  $hbase_keytab_path = hdp_default("hbase-site/hbase.master.keytab.file", "${keytab_path}/hbase.service.keytab")
+  $hbase_master_keytab_path = hdp_default("hbase-site/hbase.master.keytab.file", "${keytab_path}/hbase.service.keytab")
   $hbase_master_principal = hdp_default("hbase-site/hbase.master.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
+  $hbase_regionserver_keytab_path = hdp_default("hbase-site/hbase.regionserver.keytab.file", "${keytab_path}/hbase.service.keytab")
   $hbase_regionserver_principal = hdp_default("hbase-site/hbase.regionserver.kerberos.principal", "hbase/_HOST@${kerberos_domain}")
 
   $hbase_primary_name = hdp_default("hbase_primary_name", "hbase")

+ 0 - 26
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase-smoke.sh.erb

@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','<%=scope.function_hdp_template_var("::hdp-hbase::hbase::service_check::serviceCheckData")%>'
-scan 'ambarismoketest'
-exit

+ 1 - 2
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_grant_permissions.erb

@@ -17,5 +17,4 @@
 # under the License.
 #
 #
-grant '<%=scope.function_hdp_template_var("::hdp::params::smokeuser")%>', '<%=scope.function_hdp_template_var("::hdp-hbase::params::smokeuser_permissions")%>'
-exit
+grant '<%=scope.function_hdp_template_var("::hdp::params::smokeuser")%>', '<%=scope.function_hdp_template_var("::hdp-hbase::params::smokeuser_permissions")%>'

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_master_jaas.conf.erb

@@ -3,6 +3,6 @@ com.sun.security.auth.module.Krb5LoginModule required
 useKeyTab=true
 storeKey=true
 useTicketCache=false
-keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_keytab_path")%>"
+keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_master_keytab_path")%>"
 principal="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_jaas_princ")%>";
 };

+ 1 - 1
ambari-agent/src/main/puppet/modules/hdp-hbase/templates/hbase_regionserver_jaas.conf.erb

@@ -3,6 +3,6 @@ com.sun.security.auth.module.Krb5LoginModule required
 useKeyTab=true
 storeKey=true
 useTicketCache=false
-keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_keytab_path")%>"
+keyTab="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_regionserver_keytab_path")%>"
 principal="<%=scope.function_hdp_template_var("::hdp-hbase::params::hbase_jaas_princ")%>";
 };

+ 5 - 10
ambari-agent/src/main/puppet/modules/hdp-hive/manifests/jdbc-connector.pp

@@ -24,13 +24,8 @@ class hdp-hive::jdbc-connector()
 
   $jdbc_jar_name = $hdp-hive::params::jdbc_jar_name
   
-  
-  $java_share_dir = "/usr/share/java"
-  $driver_curl_target = "${java_share_dir}/${jdbc_jar_name}"  
   $hive_lib = $hdp-hive::params::hive_lib
   $target = "${hive_lib}/${jdbc_jar_name}"
-  $jdk_location = $hdp::params::jdk_location
-  $driver_curl_source = "${jdk_location}${jdbc_jar_name}"
   
   anchor { 'hdp-hive::jdbc-connector::begin':}
 
@@ -46,18 +41,18 @@ class hdp-hive::jdbc-connector()
        creates => $target,
        path    => ["/bin","/usr/bin/"],
        require => Hdp::Package['mysql-connector-java'],
-       before  =>  Anchor['hdp-hive::jdbc-connector::end'],
+       notify  =>  Anchor['hdp-hive::jdbc-connector::end'],
    }
   } elsif ($hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver") {
-   hdp::exec { 'hive mkdir -p ${artifact_dir} ; curl -f --retry 10 ${driver_curl_source} -o ${driver_curl_target} &&  cp ${driver_curl_target} ${target}':
-       command => "mkdir -p ${artifact_dir} ; curl -f --retry 10 ${driver_curl_source} -o ${driver_curl_target} &&  cp ${driver_curl_target} ${target}",
+   hdp::exec { 'hive mkdir -p ${artifact_dir} ;  cp /usr/share/java/${jdbc_jar_name}  ${target}':
+       command => "mkdir -p ${artifact_dir} ;  cp /usr/share/java/${jdbc_jar_name}  ${target}",
        unless  => "test -f ${target}",
        path    => ["/bin","/usr/bin/"],
-       before  =>  Anchor['hdp-hive::jdbc-connector::end'],
+       notify  =>  Anchor['hdp-hive::jdbc-connector::end'],
      }  
   }
 
 
    anchor { 'hdp-hive::jdbc-connector::end':}
-   
+
 }

+ 4 - 26
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp

@@ -39,21 +39,13 @@ class hdp-oozie::service(
   $oozie_keytab = $hdp-oozie::params::oozie_service_keytab
   $oozie_principal = $configuration['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
   
-  $oracle_driver_jar_name = "ojdbc6.jar"
-  $java_share_dir = "/usr/share/java"
-  
-  $artifact_dir = $hdp::params::artifact_dir
-  $driver_location = $hdp::params::jdk_location
-  $driver_curl_target = "${java_share_dir}/${oracle_driver_jar_name}"
-  $curl_cmd = "curl -f --retry 10 ${driver_location}${oracle_driver_jar_name} -o ${driver_curl_target}"
-  
   $jdbc_driver_name = $configuration['oozie-site']['oozie.service.JPAService.jdbc.driver']
   if ($jdbc_driver_name == "com.mysql.jdbc.Driver"){
-    $jdbc_driver_jar = "${java_share_dir}/mysql-connector-java.jar"
+    $jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
     $jdbc_driver_jar_target = "${libext_dir}/mysql-connector-java.jar"
   } elsif($jdbc_driver_name == "oracle.jdbc.driver.OracleDriver") {
-      $jdbc_driver_jar = "${java_share_dir}/${oracle_driver_jar_name}"
-      $jdbc_driver_jar_target = "${libext_dir}/${oracle_driver_jar_name}"
+      $jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+      $jdbc_driver_jar_target = "${libext_dir}/ojdbc6.jar"
   }
   
   
@@ -115,14 +107,13 @@ class hdp-oozie::service(
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_webapps_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::libext_dir : }
-  hdp-oozie::service::jdbc-connector-java { $hdp-oozie::params::libext_dir : }
 
   anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
   
   if ($ensure == 'installed_and_configured') {
     hdp-oozie::service::exec_sh{$sh_cmds:}
     hdp-oozie::service::exec_user{$user_cmds:}
-    Anchor['hdp-oozie::service::begin'] -> Hdp-oozie::Service::Jdbc-connector-java[$hdp-oozie::params::libext_dir] -> Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Anchor['hdp-oozie::service::end']
+    Anchor['hdp-oozie::service::begin'] -> Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Anchor['hdp-oozie::service::end']
   } elsif ($ensure == 'running') {
     hdp::exec { "exec $cmd6" :
       command => $cmd6,
@@ -144,19 +135,6 @@ class hdp-oozie::service(
   }
 }
 
-define hdp-oozie::service::jdbc-connector-java()
-{
-  if ($jdbc_driver_name == "com.mysql.jdbc.Driver"){
-   hdp::package { 'mysql-connector-java' : }
-  } elsif($jdbc_driver_name == "oracle.jdbc.driver.OracleDriver") {
-    exec{ "${curl_cmd} ${name}":
-      command => $curl_cmd,
-      path    => ["/bin","/usr/bin/"],
-      unless  => "test -e ${java_share_dir}/${oracle_driver_jar_name}",
-    } 
-  }                       
-}                      
-                      
 define hdp-oozie::service::directory()
 {
   hdp::directory_recursive_create { $name: 

+ 0 - 3
ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp

@@ -202,7 +202,6 @@ class hdp::params()
   $hive_apps_whs_dir = hdp_default("hive_apps_whs_dir", "/apps/hive/warehouse")
   $webhcat_apps_dir = hdp_default("webhcat_apps_dir", "/apps/webhcat")
   $hbase_hdfs_root_dir = hdp_default("hbase-site/hbase.hdfs.root.dir","/apps/hbase/data")
-  $hbase_staging_dir = hdp_default("hbase-site/hbase.bulkload.staging.dir","/apps/hbase/staging")
 
   $yarn_nm_app_log_dir = hdp_default("yarn-site/yarn.nodemanager.remote-app-log-dir","/app-logs")
 
@@ -698,7 +697,5 @@ class hdp::params()
 
   $is_namenode_master = $hdp::params::hostname in $namenode_host
   $is_jtnode_master   = $hdp::params::hostname in $jtnode_host
-  $is_rmnode_master   = $hdp::params::hostname in $rm_host
-  $is_hsnode_master   = $hdp::params::hostname in $hs_host
   $is_hbase_master    = $hdp::params::hostname in $hbase_master_hosts
 }

+ 1 - 5
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -88,7 +88,7 @@ rolesToClass = {
   'YARN_CLIENT': 'hdp-yarn::yarn_client',
   'HDFS_CLIENT': 'hdp-hadoop::client',
   'MAPREDUCE_CLIENT': 'hdp-hadoop::client',
-  'MAPREDUCE2_CLIENT': 'hdp-yarn::mapreducev2_client',
+  'MAPREDUCEv2_CLIENT': 'hdp-yarn::mapreducev2_client',
   'ZOOKEEPER_SERVER': 'hdp-zookeeper',
   'ZOOKEEPER_CLIENT': 'hdp-zookeeper::client',
   'HBASE_MASTER': 'hdp-hbase::master',
@@ -186,10 +186,6 @@ pidPathesVars = [
    'defaultValue' : '/var/run/mysqld'},
   {'var' : 'webhcat_pid_dir',
    'defaultValue' : '/var/run/webhcat'},                      
-  {'var' : 'yarn_pid_dir',
-   'defaultValue' : '/var/run/hadoop-yarn'},
-  {'var' : 'mapreduce2_pid_dir',
-   'defaultValue' : '/var/run/hadoop-mapreduce'},
 ]
 
 class AmbariConfig:

+ 25 - 30
ambari-agent/src/main/python/ambari_agent/LiveStatus.py

@@ -32,8 +32,7 @@ class LiveStatus:
   SERVICES = [
     "HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
     "NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
-    "KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT",
-    "YARN", "MAPREDUCE2"
+    "KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT"
   ]
 
   COMPONENTS = [
@@ -43,54 +42,50 @@ class LiveStatus:
        "componentName" : "NAMENODE"},
       {"serviceName" : "HDFS",
        "componentName" : "SECONDARY_NAMENODE"},
-
+#      {"serviceName" : "HDFS",
+#       "componentName" : "HDFS_CLIENT"},
       {"serviceName" : "MAPREDUCE",
        "componentName" : "JOBTRACKER"},
       {"serviceName" : "MAPREDUCE",
        "componentName" : "TASKTRACKER"},
-
-      {"serviceName" : "GANGLIA",
+#      {"serviceName" : "MAPREDUCE",
+#       "componentName" : "MAPREDUCE_CLIENT"},
+      {"serviceName" : "GANGLIA",             #!
        "componentName" : "GANGLIA_SERVER"},
-      {"serviceName" : "GANGLIA",
+      {"serviceName" : "GANGLIA",             #!
        "componentName" : "GANGLIA_MONITOR"},
-
-      {"serviceName" : "HBASE",
+      {"serviceName" : "HBASE",               #!
        "componentName" : "HBASE_MASTER"},
-      {"serviceName" : "HBASE",
+      {"serviceName" : "HBASE",              #!
        "componentName" : "HBASE_REGIONSERVER"},
-
-      {"serviceName" : "NAGIOS",
+#      {"serviceName" : "HBASE",
+#       "componentName" : "HBASE_CLIENT"},
+      {"serviceName" : "NAGIOS",             #!
        "componentName" : "NAGIOS_SERVER"},
-
       {"serviceName" : "ZOOKEEPER",
        "componentName" : "ZOOKEEPER_SERVER"},
-
+#      {"serviceName" : "ZOOKEEPER",
+#       "componentName" : "ZOOKEEPER_CLIENT"},
       {"serviceName" : "OOZIE",
        "componentName" : "OOZIE_SERVER"},
-
-      {"serviceName" : "HCATALOG",
+#      {"serviceName" : "OOZIE",
+#       "componentName" : "OOZIE_CLIENT"},
+      {"serviceName" : "HCATALOG",            #!
        "componentName" : "HCATALOG_SERVER"},
-
       {"serviceName" : "KERBEROS",
-       "componentName" : "KERBEROS_SERVER"},
-
-      {"serviceName" : "HIVE",
+       "componentName" : "KERBEROS_SERVER"}, #!
+#      {"serviceName" : "TEMPLETON",
+#       "componentName" : "TEMPLETON_SERVER"},
+#      {"serviceName" : "TEMPLETON",
+#       "componentName" : "TEMPLETON_CLIENT"},
+      {"serviceName" : "HIVE",               #!
        "componentName" : "HIVE_SERVER"},
-      {"serviceName" : "HIVE",
+      {"serviceName" : "HIVE",               #!
        "componentName" : "HIVE_METASTORE"},
-      {"serviceName" : "HIVE",
+      {"serviceName" : "HIVE",               #!
        "componentName" : "MYSQL_SERVER"},
-
       {"serviceName" : "WEBHCAT",
        "componentName" : "WEBHCAT_SERVER"},
-
-      {"serviceName" : "YARN",
-       "componentName" : "RESOURCEMANAGER"},
-      {"serviceName" : "YARN",
-       "componentName" : "NODEMANAGER"},
-
-      {"serviceName" : "MAPREDUCE2",
-       "componentName" : "HISTORYSERVER"},
   ]
 
   LIVE_STATUS = "STARTED"

+ 0 - 61
ambari-server/pom.xml

@@ -32,58 +32,6 @@
   </properties>
   <build>
     <plugins>
-      <plugin>
-         <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.8</version>
-        <executions>
-          <execution>
-            <id>parse-version</id>
-            <phase>validate</phase>
-            <goals>
-              <goal>parse-version</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>regex-property</id>
-            <goals>
-              <goal>regex-property</goal>
-            </goals>
-            <configuration>
-              <name>ambariVersion</name>
-              <value>${project.version}</value>
-              <regex>-SNAPSHOT</regex>
-              <replacement></replacement>
-              <failIfNoMatch>false</failIfNoMatch>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <artifactId>maven-resources-plugin</artifactId>
-        <version>2.6</version>
-        <executions>
-          <execution>
-            <id>copy-resources</id>
-            <phase>generate-test-resources</phase>
-            <goals>
-              <goal>copy-resources</goal>
-            </goals>
-            <configuration>
-              <outputDirectory>${basedir}/target/</outputDirectory>
-              <resources>
-                <resource>
-                  <directory>${basedir}/../</directory>
-                  <includes>
-                      <include>**/version</include>
-                  </includes>
-                  <filtering>true</filtering>
-                </resource>
-              </resources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
         <artifactId>maven-compiler-plugin</artifactId>
         <version>3.0</version>
@@ -308,7 +256,6 @@
               <sources>
                 <source>
                   <location>src/main/resources/Ambari-DDL-Postgres-CREATE.sql</location>
-                  <filter>true</filter>
                 </source>
                 <source>
                   <location>src/main/resources/Ambari-DDL-Postgres-DROP.sql</location>
@@ -329,7 +276,6 @@
               <sources>
                 <source>
                   <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql</location>
-                  <filter>true</filter>
                 </source>
                 <source>
                   <location>src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.Fix.sql</location>
@@ -415,7 +361,6 @@
               <sources>
                 <source>
                   <location>../version</location>
-                  <filter>true</filter>
                 </source>
               </sources>
             </mapping>
@@ -474,12 +419,6 @@
         -->
       </plugin>
     </plugins>
-    <resources>
-      <resource>
-        <directory>src/main/resources</directory>
-        <filtering>true</filtering>
-      </resource>
-    </resources>
   </build>
   <profiles>
   </profiles>

+ 1 - 1
ambari-server/src/main/conf/ambari.properties

@@ -16,4 +16,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 metadata.path=src/main/resources/stacks
-server.version.file=target/version
+server.version.file=../version

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/Role.java

@@ -35,7 +35,7 @@ public enum Role {
   JOBTRACKER,
   TASKTRACKER,
   MAPREDUCE_CLIENT,
-  MAPREDUCE2_CLIENT,
+  MAPREDUCEv2_CLIENT,
   JAVA_JCE,
   HADOOP_CLIENT,
   JOBTRACKER_SERVICE_CHECK,

+ 0 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaHostPropertyProvider.java

@@ -41,9 +41,7 @@ public class GangliaHostPropertyProvider extends GangliaPropertyProvider{
     GANGLIA_CLUSTER_NAMES.add("HDPNameNode");
     GANGLIA_CLUSTER_NAMES.add("HDPSlaves");
     GANGLIA_CLUSTER_NAMES.add("HDPJobTracker");
-    GANGLIA_CLUSTER_NAMES.add("HDPResourceManager");
     GANGLIA_CLUSTER_NAMES.add("HDPHBaseMaster");
-    GANGLIA_CLUSTER_NAMES.add("HDPHistoryServer");
   }
 
   // ----- Constructors ------------------------------------------------------

+ 0 - 3
ambari-server/src/main/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProvider.java

@@ -55,9 +55,6 @@ public abstract class GangliaPropertyProvider extends AbstractPropertyProvider {
     GANGLIA_CLUSTER_NAME_MAP.put("DATANODE", "HDPSlaves");
     GANGLIA_CLUSTER_NAME_MAP.put("JOBTRACKER", "HDPJobTracker");
     GANGLIA_CLUSTER_NAME_MAP.put("TASKTRACKER", "HDPSlaves");
-    GANGLIA_CLUSTER_NAME_MAP.put("RESOURCEMANAGER", "HDPResourceManager");
-    GANGLIA_CLUSTER_NAME_MAP.put("NODEMANAGER", "HDPSlaves");
-    GANGLIA_CLUSTER_NAME_MAP.put("HISTORYSERVER", "HDPHistoryServer");
     GANGLIA_CLUSTER_NAME_MAP.put("HBASE_MASTER", "HDPHBaseMaster");
     GANGLIA_CLUSTER_NAME_MAP.put("HBASE_CLIENT", "HDPSlaves");
     GANGLIA_CLUSTER_NAME_MAP.put("HBASE_REGIONSERVER", "HDPSlaves");

+ 11 - 11
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterControllerImpl.java

@@ -260,28 +260,28 @@ public class ClusterControllerImpl implements ClusterController {
     Iterable<Resource> resources = getResources(type, readRequest, predicate);
 
     PredicateBuilder pb = new PredicateBuilder();
-    PredicateBuilder.PredicateBuilderPredicate pbPredicate = null;
+    PredicateBuilder.PredicateBuilderWithPredicate pbWithPredicate = null;
 
     for (Resource resource : resources) {
-      if (pbPredicate != null) {
-        pb = pbPredicate.or();
+      if (pbWithPredicate != null) {
+        pb = pbWithPredicate.or();
       }
 
-      pb          = pb.begin();
-      pbPredicate = null;
+      pb              = pb.begin();
+      pbWithPredicate = null;
 
       for (String keyPropertyId : keyPropertyIds) {
-        if (pbPredicate != null) {
-          pb = pbPredicate.and();
+        if (pbWithPredicate != null) {
+          pb = pbWithPredicate.and();
         }
-        pbPredicate =
+        pbWithPredicate =
             pb.property(keyPropertyId).equals((Comparable) resource.getPropertyValue(keyPropertyId));
       }
-      if (pbPredicate != null) {
-        pbPredicate = pbPredicate.end();
+      if (pbWithPredicate != null) {
+        pbWithPredicate = pbWithPredicate.end();
       }
     }
-    return pbPredicate == null ? null : pbPredicate.toPredicate();
+    return pbWithPredicate == null ? null : pbWithPredicate.toPredicate();
   }
 
   /**

+ 0 - 3
ambari-server/src/main/java/org/apache/ambari/server/controller/jmx/JMXPropertyProvider.java

@@ -93,9 +93,6 @@ public class JMXPropertyProvider extends AbstractPropertyProvider {
     DEFAULT_JMX_PORTS.put("TASKTRACKER",        "50060");
     DEFAULT_JMX_PORTS.put("HBASE_MASTER",       "60010");
     DEFAULT_JMX_PORTS.put("HBASE_REGIONSERVER", "60030");
-    DEFAULT_JMX_PORTS.put("RESOURCEMANAGER",     "8088");
-    DEFAULT_JMX_PORTS.put("HISTORYSERVER",      "19888");
-    DEFAULT_JMX_PORTS.put("NODEMANAGER",         "8042");
 
     ObjectMapper objectMapper = new ObjectMapper();
     objectMapper.configure(DeserializationConfig.Feature.USE_ANNOTATIONS, false);

+ 23 - 355
ambari-server/src/main/java/org/apache/ambari/server/controller/utilities/PredicateBuilder.java

@@ -32,65 +32,6 @@ import java.util.List;
 
 /**
  * Builder for predicates.
- * <p/>
- * The builder enforces a domain specific language according to the following
- * grammar :
- * <p/>
- * <predicate> ::= <property_name> <relational_operator> <value>
- * <predicate> ::= NOT <predicate>
- * <predicate> ::= ( <predicate> )
- * <predicate> ::= <predicate> AND <predicate>
- * <predicate> ::= <predicate> OR <predicate>
- *
- * <relational_operator> ::= =|>|<|>=|<=
- * <p/>
- * The predicate builder uses the normal method chaining of the builder pattern
- * along with intermediate objects.  The use of intermediate objects allows
- * for compiler checked constraints.
- * <p/>
- * For example, the predicate builder can be used to build a predicate where
- * property1=="foo" && property2=="bar".
- *
- * <pre>
- * {@code
- * PredicateBuilder builder = new PredicateBuilder();
- *
- * Predicate predicate = builder.property(property1).equals("foo").
- *     and().property(property2).equals("bar").toPredicate();
- * }
- * </pre>
- *
- * In this example, we are starting with an instance of {@link PredicateBuilder}.
- * Calling the method {@link PredicateBuilder#property(String)} returns an
- * instance of {@link PredicateBuilderProperty} which exposes methods for attaching
- * a relational operator to the property to form a simple predicate.
- * <p/>
- * Notice that the method {@link PredicateBuilderProperty#equals(Comparable)}
- * returns an instance of {@link PredicateBuilderPredicate} which exposes methods
- * for using predicates with logical operators to create complex predicates.
- * <p/>
- * Calling the method {@link PredicateBuilderPredicate#and()} returns an instance
- * of {@link PredicateBuilder} which allows us to start over building the predicate
- * for property2.
- * <p/>
- * The reason for having these intermediate return objects is that they only
- * expose the methods that make sense for that point in the building process.
- * In other words, we can use the compiler to check the syntax of our DSL
- * grammar at compile time rather than having a single builder class with a
- * bunch of runtime checks.
- * <p/>
- * For example, if the user tries to make an inappropriate call to the and()
- * method ...
- *
- * <pre>
- * {@code
- *
- * Predicate predicate = builder.property(property1).and().
- *     property(property2).equals("bar").toPredicate();
- * }
- * </pre>
- *
- * ... the compiler will flag it as an error and the code will simply not compile.
  */
 public class PredicateBuilder {
 
@@ -101,122 +42,47 @@ public class PredicateBuilder {
   private boolean done = false;
   private boolean not = false;
 
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Construct a predicate builder.
-   */
   public PredicateBuilder() {
     this.outer = null;
   }
 
-  /**
-   * Construct a predicate builder within another predicate builder.
-   *
-   * @param outer  the outer predicate builder
-   */
   private PredicateBuilder(PredicateBuilder outer) {
     this.outer = outer;
   }
 
-
-  // ----- enums ------------------------------------------------------
-
-  /**
-   * Logical operators
-   */
   private enum Operator {
     And,
     Or
   }
 
-
-  // ----- PredicateBuilder --------------------------------------------------
-
-  /**
-   * Create a property from the given property id.  This supports method
-   * chaining by returning an instance of {@link PredicateBuilderProperty}
-   * which is an intermediate object that represents the property in the DSL.
-   *
-   * @param id  the property id
-   *
-   * @return a property that can be used in the building of the predicate.
-   *
-   * @throws IllegalStateException if an attempt is made to reuse a predicate builder
-   */
-  public PredicateBuilderProperty property(String id) {
+  public PredicateBuilderWithProperty property(String id) {
     checkDone();
     propertyId = id;
-    return new PredicateBuilderProperty();
+    return new PredicateBuilderWithProperty();
   }
 
-  /**
-   * Set the builder to negate the predicate being built.  This supports method
-   * chaining by returning an instance of {@link PredicateBuilder} which can be
-   * used to continue building the predicate.
-   *
-   * For example, the following shows a usage of the not() method to
-   * produce a predicate where property "p1" does not equal "foo".
-   *
-   * <pre>
-   * {@code
-   * Predicate predicate = builder.not().property("p1").equals("foo").toPredicate();
-   * }
-   * </pre>
-   *
-   * @return a builder to be used to continue building the predicate
-   */
   public PredicateBuilder not() {
     not = true;
     return this;
   }
 
-  /**
-   * Set the builder to begin a block around the predicate being built.  Calling this
-   * method is the equivalent of using a left parenthesis.  This supports method
-   * chaining by returning an instance of {@link PredicateBuilder} which can be
-   * used to continue building the predicate.
-   *
-   * For example, the following shows a usage of the begin() method to
-   * produce a predicate where p1==foo && (p2==bar || p3 == cat).
-   *
-   * <pre>
-   * {@code
-   * Predicate predicate = builder.property("p1").equals("foo").and().
-   *     begin().property("p2").equals("bar").or().property("p3").equals("cat").end().
-   *     toPredicate();
-   * }
-   * </pre>
-   *
-   * @return a builder to be used to continue building the predicate
-   *
-   * @throws IllegalStateException if an attempt is made to reuse a predicate builder
-   */
   public PredicateBuilder begin() {
     checkDone();
     return new PredicateBuilder(this);
   }
 
-  /**
-   * Produce a {@link Predicate} object from the builder.
-   *
-   * @return the predicate object
-   */
   public Predicate toPredicate() {
     return getPredicate();
   }
 
-  // ----- helper methods ----------------------------------------------------
-
   private void checkDone() {
     if (done) {
       throw new IllegalStateException("Can't reuse a predicate builder.");
     }
   }
 
-  private PredicateBuilderPredicate getPredicateBuilderWithPredicate() {
-    return new PredicateBuilderPredicate();
+  private PredicateBuilderWithPredicate getPredicateBuilderWithPredicate() {
+    return new PredicateBuilderWithPredicate();
   }
 
   private void addPredicate(Predicate predicate) {
@@ -261,212 +127,60 @@ public class PredicateBuilder {
     throw new IllegalStateException("Can't return a predicate.");
   }
 
-  // ----- inner classes -----------------------------------------------------
-
-  // ----- PredicateBuilderProperty ------------------------------------------
-
-  /**
-   * A builder object that represents the property portion of the predicate being built.
-   * The PredicateBuilderProperty is itself a builder object that may be returned for
-   * method chaining of the predicate builder methods.
-   */
-  public class PredicateBuilderProperty {
-
-    /**
-     * Create a {@link PredicateBuilderPredicate} representing an equals 
-     * predicate for the property represented by this builder for the given
-     * value.  This supports method chaining by returning an instance of 
-     * {@link PredicateBuilderPredicate} which can be used to continue building 
-     * the predicate.
-     *
-     * For example, the following shows a usage of the equals() method to
-     * produce a predicate where p1==foo.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").equals("foo").
-     *     toPredicate();
-     * }
-     * </pre>
-     * 
-     * @param value  the right operand (value) of the = operator
-     * @param <T>    the type of the property
-     *           
-     * @return a new builder representing an equals predicate
-     *
-     * @throws IllegalStateException if no property name was specified on this builder
-     */
-    public <T>PredicateBuilderPredicate equals(Comparable<T> value) {
+  public class PredicateBuilderWithProperty {
+
+    // ----- Equals -----
+    public <T>PredicateBuilderWithPredicate equals(Comparable<T> value) {
       if (propertyId == null) {
         throw new IllegalStateException("No property.");
       }
       addPredicate(new EqualsPredicate<T>(propertyId, value));
 
-      return new PredicateBuilderPredicate();
+      return new PredicateBuilderWithPredicate();
     }
 
-    /**
-     * Create a {@link PredicateBuilderPredicate} representing an greater than 
-     * predicate for the property represented by this builder for the given
-     * value.  This supports method chaining by returning an instance of 
-     * {@link PredicateBuilderPredicate} which can be used to continue building 
-     * the predicate.
-     *
-     * For example, the following shows a usage of the greaterThan() method to
-     * produce a predicate where p1 > 5.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").greaterThan(5).
-     *     toPredicate();
-     * }
-     * </pre>
-     *
-     * @param value  the right operand (value) of the > operator
-     * @param <T>    the type of the property
-     *
-     * @return a new builder representing a greater than predicate
-     *
-     * @throws IllegalStateException if no property name was specified on this builder
-     */
-    public <T>PredicateBuilderPredicate greaterThan(Comparable<T> value) {
+    // ----- Greater than -----
+    public <T>PredicateBuilderWithPredicate greaterThan(Comparable<T> value) {
       if (propertyId == null) {
         throw new IllegalStateException("No property.");
       }
       addPredicate(new GreaterPredicate<T>(propertyId, value));
 
-      return new PredicateBuilderPredicate();
+      return new PredicateBuilderWithPredicate();
     }
 
-    /**
-     * Create a {@link PredicateBuilderPredicate} representing a 
-     * greater than or equals predicate for the property represented by this 
-     * builder for the given value.  This supports method chaining by returning 
-     * an instance of {@link PredicateBuilderPredicate} which can be used to 
-     * continue building the predicate.
-     *
-     * For example, the following shows a usage of the greaterThanEqualTo()
-     * method to produce a predicate where p1 >= 5.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").greaterThanEqualTo(5).
-     *     toPredicate();
-     * }
-     * </pre>
-     *
-     * @param value  the right operand (value) of the >= operator
-     * @param <T>    the type of the property
-     *
-     * @return a new builder representing a greater than or equals predicate
-     *
-     * @throws IllegalStateException if no property name was specified on this builder
-     */
-    public <T>PredicateBuilderPredicate greaterThanEqualTo(Comparable<T> value) {
+    // ----- Greater than equal to -----
+    public <T>PredicateBuilderWithPredicate greaterThanEqualTo(Comparable<T> value) {
       if (propertyId == null) {
         throw new IllegalStateException("No property.");
       }
       addPredicate(new GreaterEqualsPredicate<T>(propertyId, value));
 
-      return new PredicateBuilderPredicate();
+      return new PredicateBuilderWithPredicate();
     }
 
-    /**
-     * Create a {@link PredicateBuilderPredicate} representing a 
-     * less than predicate for the property represented by this builder 
-     * for the given value.  This supports method chaining by returning 
-     * an instance of {@link PredicateBuilderPredicate} which can be used to 
-     * continue building the predicate.
-     *
-     * For example, the following shows a usage of the lessThan()
-     * method to produce a predicate where p1 < 5.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").lessThan(5).
-     *     toPredicate();
-     * }
-     * </pre>
-     *
-     * @param value  the right operand (value) of the < operator
-     * @param <T>    the type of the property
-     *
-     * @return a new builder representing a less than predicate
-     *
-     * @throws IllegalStateException if no property name was specified on this builder
-     */
-    public <T>PredicateBuilderPredicate lessThan(Comparable<T> value) {
+    // ----- Less than -----
+    public <T>PredicateBuilderWithPredicate lessThan(Comparable<T> value) {
       if (propertyId == null) {
         throw new IllegalStateException("No property.");
       }
       addPredicate(new LessPredicate<T>(propertyId, value));
 
-      return new PredicateBuilderPredicate();
+      return new PredicateBuilderWithPredicate();
     }
 
-    /**
-     * Create a {@link PredicateBuilderPredicate} representing a 
-     * less than or equals predicate for the property represented by this 
-     * builder for the given value.  This supports method chaining by returning 
-     * an instance of {@link PredicateBuilderPredicate} which can be used to 
-     * continue building the predicate.
-     *
-     * For example, the following shows a usage of the lessThanEqualTo()
-     * method to produce a predicate where p1 <= 5.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").lessThanEqualTo(5).
-     *     toPredicate();
-     * }
-     * </pre>
-     *
-     * @param value  the right operand (value) of the <= operator
-     * @param <T>    the type of the property
-     *
-     * @return a new builder representing a less than or equals predicate
-     *
-     * @throws IllegalStateException if no property name was specified on this builder
-     */
-    public <T>PredicateBuilderPredicate lessThanEqualTo(Comparable<T> value) {
+    // ----- Less than equal to -----
+    public <T>PredicateBuilderWithPredicate lessThanEqualTo(Comparable<T> value) {
       if (propertyId == null) {
         throw new IllegalStateException("No property.");
       }
       addPredicate(new LessEqualsPredicate<T>(propertyId, value));
 
-      return new PredicateBuilderPredicate();
+      return new PredicateBuilderWithPredicate();
     }
   }
 
-  // ----- PredicateBuilderPredicate -----------------------------------------
-  
-  /**
-   * A builder object that represents an inner predicate portion of the predicate being built.
-   * Note that the predicate represented by an instance of PredicateBuilderPredicate may be
-   * part of a larger complex predicate being built by the predicate builder.  The
-   * PredicateBuilderPredicate is itself a builder object that may be returned for method
-   * chaining of the predicate builder methods.
-   */
-  public class PredicateBuilderPredicate {
-
-    /**
-     * Get a {@link PredicateBuilder} object that can be used to build a 
-     * predicate that will be ANDed with the predicate represented by this 
-     * PredicateBuilderPredicate.
-     *
-     * For example, the following shows a usage of the and() method to
-     * produce a predicate where p1==foo && p2==bar.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property(p1).equals("foo").
-     *     and().property(p2).equals("bar").toPredicate();
-     * }
-     * </pre>
-     * 
-     * @return a new predicate builder that should be used to build the predicate
-     *         being ANDed with the predicate from this builder
-     */
+  public class PredicateBuilderWithPredicate {
     public PredicateBuilder and() {
 
       if (operator != Operator.And) {
@@ -476,24 +190,6 @@ public class PredicateBuilder {
       return PredicateBuilder.this;
     }
 
-    /**
-     * Get a {@link PredicateBuilder} object that can be used to build a 
-     * predicate that will be ORed with the predicate represented by this 
-     * PredicateBuilderPredicate.
-     *
-     * For example, the following shows a usage of the and() method to
-     * produce a predicate where p1==foo || p2==bar.
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property(p1).equals("foo").
-     *     or().property(p2).equals("bar").toPredicate();
-     * }
-     * </pre>
-     *
-     * @return a new predicate builder that should be used to build the predicate
-     *         being ORed with the predicate from this builder
-     */
     public PredicateBuilder or() {
 
       if (operator != Operator.Or) {
@@ -503,13 +199,6 @@ public class PredicateBuilder {
       return PredicateBuilder.this;
     }
 
-    /**
-     * Produce a {@link Predicate} object from the builder.
-     *
-     * @return the predicate object
-     *
-     * @throws IllegalStateException if the block is unbalanced (missing end call)
-     */
     public Predicate toPredicate() {
       if (outer != null) {
         throw new IllegalStateException("Unbalanced block - missing end.");
@@ -518,28 +207,7 @@ public class PredicateBuilder {
       return getPredicate();
     }
 
-    /**
-     * Set the builder to end a block around the predicate being built.  Calling this
-     * method is the equivalent of using a right parenthesis.  This supports method
-     * chaining by returning an instance of {@link PredicateBuilderPredicate} which can 
-     * be used to continue building the predicate.
-     *
-     * For example, the following shows a usage of the end() method to
-     * produce a predicate where p1==foo && (p2==bar || p3 == cat).
-     *
-     * <pre>
-     * {@code
-     * Predicate predicate = builder.property("p1").equals("foo").and().
-     *     begin().property("p2").equals("bar").or().property("p3").equals("cat").end().
-     *     toPredicate();
-     * }
-     * </pre>
-     *
-     * @return a builder to be used to continue building the predicate
-     *
-     * @throws IllegalStateException if the block is unbalanced (missing end call)
-     */
-    public PredicateBuilderPredicate end() {
+    public PredicateBuilderWithPredicate end() {
       if (outer == null) {
         throw new IllegalStateException("Unbalanced block - missing begin.");
       }

+ 0 - 6
ambari-server/src/main/java/org/apache/ambari/server/metadata/RoleCommandOrder.java

@@ -155,8 +155,6 @@ public class RoleCommandOrder {
         RoleCommand.START);
     addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.DATANODE,
         RoleCommand.START);
-    addDependency(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE, Role.SECONDARY_NAMENODE,
-        RoleCommand.START);
     addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
         Role.JOBTRACKER, RoleCommand.START);
     addDependency(Role.MAPREDUCE_SERVICE_CHECK, RoleCommand.EXECUTE,
@@ -181,10 +179,6 @@ public class RoleCommandOrder {
         Role.JOBTRACKER, RoleCommand.START);
     addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
         Role.TASKTRACKER, RoleCommand.START);
-    addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.RESOURCEMANAGER, RoleCommand.START);
-    addDependency(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE,
-        Role.NODEMANAGER, RoleCommand.START);
     addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,
         Role.JOBTRACKER, RoleCommand.START);
     addDependency(Role.SQOOP_SERVICE_CHECK, RoleCommand.EXECUTE,

+ 236 - 396
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceComponentImpl.java

@@ -20,8 +20,6 @@ package org.apache.ambari.server.state;
 
 import java.util.*;
 import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.gson.Gson;
 import com.google.inject.Inject;
@@ -43,8 +41,6 @@ public class ServiceComponentImpl implements ServiceComponent {
 
   private final static Logger LOG =
       LoggerFactory.getLogger(ServiceComponentImpl.class);
-  
-  ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
 
   private final Service service;
 
@@ -158,374 +154,260 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
-  public String getName() {
-    readWriteLock.readLock().lock();
-    try {
-      return desiredStateEntity.getComponentName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized String getName() {
+    return desiredStateEntity.getComponentName();
   }
 
   @Override
-  public String getServiceName() {
-    readWriteLock.readLock().lock();
-    try {
-      return service.getName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized String getServiceName() {
+    return service.getName();
   }
 
   @Override
-  public long getClusterId() {
-    readWriteLock.readLock().lock();
-    try {
-      return this.service.getClusterId();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized long getClusterId() {
+    return this.service.getClusterId();
   }
 
   @Override
-  public Map<String, ServiceComponentHost>
+  public synchronized Map<String, ServiceComponentHost>
       getServiceComponentHosts() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(hostComponents);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return Collections.unmodifiableMap(hostComponents);
   }
 
   @Override
-  public void addServiceComponentHosts(
+  public synchronized void addServiceComponentHosts(
       Map<String, ServiceComponentHost> hostComponents) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      // TODO validation
-      for (Entry<String, ServiceComponentHost> entry :
-          hostComponents.entrySet()) {
-        if (!entry.getKey().equals(entry.getValue().getHostName())) {
-          throw new AmbariException("Invalid arguments in map"
-              + ", hostname does not match the key in map");
-        }
+    // TODO validation
+    for (Entry<String, ServiceComponentHost> entry :
+      hostComponents.entrySet()) {
+      if (!entry.getKey().equals(entry.getValue().getHostName())) {
+        throw new AmbariException("Invalid arguments in map"
+            + ", hostname does not match the key in map");
       }
-      for (ServiceComponentHost sch : hostComponents.values()) {
-        addServiceComponentHost(sch);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
     }
-
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      addServiceComponentHost(sch);
+    }
   }
 
   @Override
-  public void addServiceComponentHost(
+  public synchronized void addServiceComponentHost(
       ServiceComponentHost hostComponent) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      // TODO validation
-      // TODO ensure host belongs to cluster
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", hostname=" + hostComponent.getHostName());
-      }
-      if (hostComponents.containsKey(hostComponent.getHostName())) {
-        throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", hostname=" + hostComponent.getHostName());
-      }
-      // FIXME need a better approach of caching components by host
-      ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-      clusterImpl.addServiceComponentHost(hostComponent);
-      this.hostComponents.put(hostComponent.getHostName(), hostComponent);
-    } finally {
-      readWriteLock.writeLock().unlock();
+    // TODO validation
+    // TODO ensure host belongs to cluster
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", hostname=" + hostComponent.getHostName());
     }
-
+    if (hostComponents.containsKey(hostComponent.getHostName())) {
+      throw new AmbariException("Cannot add duplicate ServiceComponentHost"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", hostname=" + hostComponent.getHostName());
+    }
+    // FIXME need a better approach of caching components by host
+    ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+    clusterImpl.addServiceComponentHost(hostComponent);
+    this.hostComponents.put(hostComponent.getHostName(), hostComponent);
   }
 
   @Override
-  public ServiceComponentHost addServiceComponentHost(
+  public synchronized ServiceComponentHost addServiceComponentHost(
       String hostName) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      // TODO validation
-      // TODO ensure host belongs to cluster
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", hostname=" + hostName);
-      }
-      if (hostComponents.containsKey(hostName)) {
-        throw new AmbariException("Cannot add duplicate ServiceComponentHost"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", hostname=" + hostName);
-      }
-      ServiceComponentHost hostComponent =
-          serviceComponentHostFactory.createNew(this, hostName, this.isClientComponent());
-      // FIXME need a better approach of caching components by host
-      ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-      clusterImpl.addServiceComponentHost(hostComponent);
-
-      this.hostComponents.put(hostComponent.getHostName(), hostComponent);
-
-      return hostComponent;
-    } finally {
-      readWriteLock.writeLock().unlock();
+    // TODO validation
+    // TODO ensure host belongs to cluster
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding a ServiceComponentHost to ServiceComponent"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", hostname=" + hostName);
     }
+    if (hostComponents.containsKey(hostName)) {
+      throw new AmbariException("Cannot add duplicate ServiceComponentHost"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", hostname=" + hostName);
+    }
+    ServiceComponentHost hostComponent =
+        serviceComponentHostFactory.createNew(this, hostName, this.isClientComponent());
+    // FIXME need a better approach of caching components by host
+    ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+    clusterImpl.addServiceComponentHost(hostComponent);
+
+    this.hostComponents.put(hostComponent.getHostName(), hostComponent);
 
+    return hostComponent;
   }
 
   @Override
   public ServiceComponentHost getServiceComponentHost(String hostname)
     throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      if (!hostComponents.containsKey(hostname)) {
-        throw new ServiceComponentHostNotFoundException(getClusterName(),
-            getServiceName(), getName(), hostname);
-      }
-      return this.hostComponents.get(hostname);
-    } finally {
-      readWriteLock.readLock().unlock();
+    if (!hostComponents.containsKey(hostname)) {
+      throw new ServiceComponentHostNotFoundException(getClusterName(),
+          getServiceName(), getName(), hostname);
     }
-
+    return this.hostComponents.get(hostname);
   }
 
   @Override
-  public State getDesiredState() {
-    readWriteLock.readLock().lock();
-    try {
-      return desiredStateEntity.getDesiredState();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized State getDesiredState() {
+    return desiredStateEntity.getDesiredState();
   }
 
   @Override
-  public void setDesiredState(State state) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredState of Service"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", oldDesiredState=" + getDesiredState()
-            + ", newDesiredState=" + state);
-      }
-      desiredStateEntity.setDesiredState(state);
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void setDesiredState(State state) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredState of Service"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", oldDesiredState=" + getDesiredState()
+          + ", newDesiredState=" + state);
     }
-
+    desiredStateEntity.setDesiredState(state);
+    saveIfPersisted();
   }
 
   @Override
-  public Map<String, Config> getDesiredConfigs() {
-    readWriteLock.readLock().lock();
-    try {
-      Map<String, Config> map = new HashMap<String, Config>();
-      for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-        Config config = service.getCluster().getConfig(entry.getKey(), entry.getValue());
-        if (null != config) {
-          map.put(entry.getKey(), config);
-        }
+  public synchronized Map<String, Config> getDesiredConfigs() {
+    Map<String, Config> map = new HashMap<String, Config>();
+    for (Entry<String, String> entry : desiredConfigs.entrySet()) {
+      Config config = service.getCluster().getConfig(entry.getKey(), entry.getValue());
+      if (null != config) {
+        map.put(entry.getKey(), config);
       }
+    }
 
-      Map<String, Config> svcConfigs = service.getDesiredConfigs();
-      for (Entry<String, Config> entry : svcConfigs.entrySet()) {
-        if (!map.containsKey(entry.getKey())) {
-          map.put(entry.getKey(), entry.getValue());
-        }
+    Map<String, Config> svcConfigs = service.getDesiredConfigs();
+    for (Entry<String, Config> entry : svcConfigs.entrySet()) {
+      if (!map.containsKey(entry.getKey())) {
+        map.put(entry.getKey(), entry.getValue());
       }
-
-      return Collections.unmodifiableMap(map);
-    } finally {
-      readWriteLock.readLock().unlock();
     }
 
+    return Collections.unmodifiableMap(map);
   }
 
   @Override
-  public void updateDesiredConfigs(Map<String, Config> configs) {
-
-    readWriteLock.writeLock().lock();
-    try {
-      for (Entry<String, Config> entry : configs.entrySet()) {
-        boolean contains = false;
-
-        for (ComponentConfigMappingEntity componentConfigMappingEntity : desiredStateEntity.getComponentConfigMappingEntities()) {
-          if (entry.getKey().equals(componentConfigMappingEntity.getConfigType())) {
-            contains = true;
-            componentConfigMappingEntity.setTimestamp(new Date().getTime());
-            componentConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
-            if (persisted) {
-              componentConfigMappingDAO.merge(componentConfigMappingEntity);
-            }
+  public synchronized void updateDesiredConfigs(Map<String, Config> configs) {
+
+    for (Entry<String,Config> entry : configs.entrySet()) {
+      boolean contains = false;
+
+      for (ComponentConfigMappingEntity componentConfigMappingEntity : desiredStateEntity.getComponentConfigMappingEntities()) {
+        if (entry.getKey().equals(componentConfigMappingEntity.getConfigType())) {
+          contains = true;
+          componentConfigMappingEntity.setTimestamp(new Date().getTime());
+          componentConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
+          if (persisted) {
+            componentConfigMappingDAO.merge(componentConfigMappingEntity);
           }
         }
+      }
 
-        if (!contains) {
-          ComponentConfigMappingEntity newEntity = new ComponentConfigMappingEntity();
-          newEntity.setClusterId(desiredStateEntity.getClusterId());
-          newEntity.setServiceName(desiredStateEntity.getServiceName());
-          newEntity.setComponentName(desiredStateEntity.getComponentName());
-          newEntity.setConfigType(entry.getKey());
-          newEntity.setVersionTag(entry.getValue().getVersionTag());
-          newEntity.setTimestamp(new Date().getTime());
-          newEntity.setServiceComponentDesiredStateEntity(desiredStateEntity);
-          desiredStateEntity.getComponentConfigMappingEntities().add(newEntity);
-
-        }
-
+      if (!contains) {
+        ComponentConfigMappingEntity newEntity = new ComponentConfigMappingEntity();
+        newEntity.setClusterId(desiredStateEntity.getClusterId());
+        newEntity.setServiceName(desiredStateEntity.getServiceName());
+        newEntity.setComponentName(desiredStateEntity.getComponentName());
+        newEntity.setConfigType(entry.getKey());
+        newEntity.setVersionTag(entry.getValue().getVersionTag());
+        newEntity.setTimestamp(new Date().getTime());
+        newEntity.setServiceComponentDesiredStateEntity(desiredStateEntity);
+        desiredStateEntity.getComponentConfigMappingEntities().add(newEntity);
 
-        this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
       }
 
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+
+      this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
     }
 
+    saveIfPersisted();
   }
 
   @Override
-  public StackId getDesiredStackVersion() {
-    readWriteLock.readLock().lock();
-    try {
-      return gson.fromJson(desiredStateEntity.getDesiredStackVersion(), StackId.class);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized StackId getDesiredStackVersion() {
+    return gson.fromJson(desiredStateEntity.getDesiredStackVersion(), StackId.class);
   }
 
   @Override
-  public void setDesiredStackVersion(StackId stackVersion) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredStackVersion of Service"
-            + ", clusterName=" + service.getCluster().getClusterName()
-            + ", clusterId=" + service.getCluster().getClusterId()
-            + ", serviceName=" + service.getName()
-            + ", serviceComponentName=" + getName()
-            + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-            + ", newDesiredStackVersion=" + stackVersion);
-      }
-      desiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void setDesiredStackVersion(StackId stackVersion) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredStackVersion of Service"
+          + ", clusterName=" + service.getCluster().getClusterName()
+          + ", clusterId=" + service.getCluster().getClusterId()
+          + ", serviceName=" + service.getName()
+          + ", serviceComponentName=" + getName()
+          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
+          + ", newDesiredStackVersion=" + stackVersion);
     }
-
+    desiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
+    saveIfPersisted();
   }
 
   @Override
-  public ServiceComponentResponse convertToResponse() {
-    readWriteLock.readLock().lock();
-    try {
-      ServiceComponentResponse r = new ServiceComponentResponse(
-          getClusterId(), service.getCluster().getClusterName(),
-          service.getName(), getName(), this.desiredConfigs,
-          getDesiredStackVersion().getStackId(),
-          getDesiredState().toString());
-      return r;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized ServiceComponentResponse convertToResponse() {
+    ServiceComponentResponse r  = new ServiceComponentResponse(
+        getClusterId(), service.getCluster().getClusterName(),
+        service.getName(), getName(), this.desiredConfigs,
+        getDesiredStackVersion().getStackId(),
+        getDesiredState().toString());
+    return r;
   }
 
   @Override
   public String getClusterName() {
-    readWriteLock.readLock().lock();
-    try {
-      return service.getCluster().getClusterName();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+    return service.getCluster().getClusterName();
   }
 
   @Override
-  public void debugDump(StringBuilder sb) {
-    readWriteLock.readLock().lock();
-    try {
-      sb.append("ServiceComponent={ serviceComponentName=" + getName()
-          + ", clusterName=" + service.getCluster().getClusterName()
-          + ", clusterId=" + service.getCluster().getClusterId()
-          + ", serviceName=" + service.getName()
-          + ", desiredStackVersion=" + getDesiredStackVersion()
-          + ", desiredState=" + getDesiredState().toString()
-          + ", hostcomponents=[ ");
-      boolean first = true;
-      for (ServiceComponentHost sch : hostComponents.values()) {
-        if (!first) {
-          sb.append(" , ");
-          first = false;
-        }
-        sb.append("\n        ");
-        sch.debugDump(sb);
-        sb.append(" ");
+  public synchronized void debugDump(StringBuilder sb) {
+    sb.append("ServiceComponent={ serviceComponentName=" + getName()
+        + ", clusterName=" + service.getCluster().getClusterName()
+        + ", clusterId=" + service.getCluster().getClusterId()
+        + ", serviceName=" + service.getName()
+        + ", desiredStackVersion=" + getDesiredStackVersion()
+        + ", desiredState=" + getDesiredState().toString()
+        + ", hostcomponents=[ ");
+    boolean first = true;
+    for(ServiceComponentHost sch : hostComponents.values()) {
+      if (!first) {
+        sb.append(" , ");
+        first = false;
       }
-      sb.append(" ] }");
-    } finally {
-      readWriteLock.readLock().unlock();
+      sb.append("\n        ");
+      sch.debugDump(sb);
+      sb.append(" ");
     }
-
+    sb.append(" ] }");
   }
 
   @Override
-  public boolean isPersisted() {
-    readWriteLock.readLock().lock();
-    try {
+  public synchronized boolean isPersisted() {
       return persisted;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
   }
 
   @Override
-  public void persist() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!persisted) {
-        persistEntities();
-        refresh();
-        service.refresh();
-        persisted = true;
-      } else {
-        saveIfPersisted();
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void persist() {
+    if (!persisted) {
+      persistEntities();
+      refresh();
+      service.refresh();
+      persisted = true;
+    } else {
+      saveIfPersisted();
     }
-
   }
 
   @Transactional
@@ -542,35 +424,23 @@ public class ServiceComponentImpl implements ServiceComponent {
 
   @Override
   @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted()) {
-        ServiceComponentDesiredStateEntityPK pk = new ServiceComponentDesiredStateEntityPK();
-        pk.setComponentName(getName());
-        pk.setClusterId(getClusterId());
-        pk.setServiceName(getServiceName());
-        // TODO: desiredStateEntity is assigned in unway, may be a bug
-        desiredStateEntity = serviceComponentDesiredStateDAO.findByPK(pk);
-        serviceComponentDesiredStateDAO.refresh(desiredStateEntity);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void refresh() {
+    if (isPersisted()) {
+      ServiceComponentDesiredStateEntityPK pk = new ServiceComponentDesiredStateEntityPK();
+      pk.setComponentName(getName());
+      pk.setClusterId(getClusterId());
+      pk.setServiceName(getServiceName());
+      // TODO: desiredStateEntity is assigned in unsynchronized way, may be a bug
+      desiredStateEntity = serviceComponentDesiredStateDAO.findByPK(pk);
+      serviceComponentDesiredStateDAO.refresh(desiredStateEntity);
     }
-
   }
 
   @Transactional
-  private void saveIfPersisted() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted()) {
-        serviceComponentDesiredStateDAO.merge(desiredStateEntity);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  private synchronized void saveIfPersisted() {
+    if (isPersisted()) {
+      serviceComponentDesiredStateDAO.merge(desiredStateEntity);
     }
-
   }
 
   @Override
@@ -579,125 +449,95 @@ public class ServiceComponentImpl implements ServiceComponent {
   }
 
   @Override
-  public boolean canBeRemoved() {
-    readWriteLock.readLock().lock();
-    try {
-      if (!getDesiredState().isRemovableState()) {
-        return false;
-      }
+  public synchronized boolean canBeRemoved() {
+    if (!getDesiredState().isRemovableState()) {
+      return false;
+    }
 
-      for (ServiceComponentHost sch : hostComponents.values()) {
-        if (!sch.canBeRemoved()) {
-          LOG.warn("Found non removable hostcomponent when trying to"
-              + " delete service component"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + getServiceName()
-              + ", componentName=" + getName()
-              + ", hostname=" + sch.getHostName());
-          return false;
-        }
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      if (!sch.canBeRemoved()) {
+        LOG.warn("Found non removable hostcomponent when trying to"
+            + " delete service component"
+            + ", clusterName=" + getClusterName()
+            + ", serviceName=" + getServiceName()
+            + ", componentName=" + getName()
+            + ", hostname=" + sch.getHostName());
+        return false;
       }
-      return true;
-    } finally {
-      readWriteLock.readLock().unlock();
     }
-
+    return true;
   }
 
   @Override
   @Transactional
-  public void deleteAllServiceComponentHosts()
+  public synchronized void deleteAllServiceComponentHosts()
       throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      LOG.info("Deleting all servicecomponenthosts for component"
-          + ", clusterName=" + getClusterName()
-          + ", serviceName=" + getServiceName()
-          + ", componentName=" + getName());
-      for (ServiceComponentHost sch : hostComponents.values()) {
-        if (!sch.canBeRemoved()) {
-          throw new AmbariException("Found non removable hostcomponent "
-              + " when trying to delete"
-              + " all hostcomponents from servicecomponent"
-              + ", clusterName=" + getClusterName()
-              + ", serviceName=" + getServiceName()
-              + ", componentName=" + getName()
-              + ", hostname=" + sch.getHostName());
-        }
-      }
-
-      for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
-        serviceComponentHost.delete();
+    LOG.info("Deleting all servicecomponenthosts for component"
+        + ", clusterName=" + getClusterName()
+        + ", serviceName=" + getServiceName()
+        + ", componentName=" + getName());
+    for (ServiceComponentHost sch : hostComponents.values()) {
+      if (!sch.canBeRemoved()) {
+        throw new AmbariException("Found non removable hostcomponent "
+            + " when trying to delete"
+            + " all hostcomponents from servicecomponent"
+            + ", clusterName=" + getClusterName()
+            + ", serviceName=" + getServiceName()
+            + ", componentName=" + getName()
+            + ", hostname=" + sch.getHostName());
       }
+    }
 
-      hostComponents.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    for (ServiceComponentHost serviceComponentHost : hostComponents.values()) {
+      serviceComponentHost.delete();
     }
 
+    hostComponents.clear();
   }
 
   @Override
-  public void deleteServiceComponentHosts(String hostname)
+  public synchronized void deleteServiceComponentHosts(String hostname)
       throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      ServiceComponentHost sch = getServiceComponentHost(hostname);
-      LOG.info("Deleting servicecomponenthost for cluster"
+    ServiceComponentHost sch = getServiceComponentHost(hostname);
+    LOG.info("Deleting servicecomponenthost for cluster"
+        + ", clusterName=" + getClusterName()
+        + ", serviceName=" + getServiceName()
+        + ", componentName=" + getName()
+        + ", hostname=" + sch.getHostName());
+    if (!sch.canBeRemoved()) {
+      throw new AmbariException("Could not delete hostcomponent from cluster"
           + ", clusterName=" + getClusterName()
           + ", serviceName=" + getServiceName()
           + ", componentName=" + getName()
           + ", hostname=" + sch.getHostName());
-      if (!sch.canBeRemoved()) {
-        throw new AmbariException("Could not delete hostcomponent from cluster"
-            + ", clusterName=" + getClusterName()
-            + ", serviceName=" + getServiceName()
-            + ", componentName=" + getName()
-            + ", hostname=" + sch.getHostName());
-      }
-      sch.delete();
-      hostComponents.remove(hostname);
-
-      // FIXME need a better approach of caching components by host
-      ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
-      clusterImpl.removeServiceComponentHost(sch);
-    } finally {
-      readWriteLock.writeLock().unlock();
     }
+    sch.delete();
+    hostComponents.remove(hostname);
 
+    // FIXME need a better approach of caching components by host
+    ClusterImpl clusterImpl = (ClusterImpl) service.getCluster();
+    clusterImpl.removeServiceComponentHost(sch);
   }
 
   @Override
-  public void deleteDesiredConfigs(Set<String> configTypes) {
-    readWriteLock.writeLock().lock();
-    try {
-      componentConfigMappingDAO.removeByType(configTypes);
-      for (String configType : configTypes) {
-        desiredConfigs.remove(configType);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void deleteDesiredConfigs(Set<String> configTypes) {
+    componentConfigMappingDAO.removeByType(configTypes);
+    for (String configType : configTypes) {
+      desiredConfigs.remove(configType);
     }
-
   }
 
   @Override
   @Transactional
-  public void delete() throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      deleteAllServiceComponentHosts();
-
-      if (persisted) {
-        removeEntities();
-        persisted = false;
-      }
+  public synchronized void delete() throws AmbariException {
+    deleteAllServiceComponentHosts();
 
-      desiredConfigs.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (persisted) {
+      removeEntities();
+      persisted = false;
     }
 
+    desiredConfigs.clear();
   }
 
   @Transactional

+ 198 - 321
ambari-server/src/main/java/org/apache/ambari/server/state/ServiceImpl.java

@@ -20,8 +20,6 @@ package org.apache.ambari.server.state;
 
 import java.util.*;
 import java.util.Map.Entry;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
@@ -41,7 +39,6 @@ import com.google.inject.persist.Transactional;
 
 
 public class ServiceImpl implements Service {
-  private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
 
   private ClusterServiceEntity serviceEntity;
   private ServiceDesiredStateEntity serviceDesiredStateEntity;
@@ -144,7 +141,7 @@ public class ServiceImpl implements Service {
 
   @Override
   public String getName() {
-    return serviceEntity.getServiceName();
+      return serviceEntity.getServiceName();
   }
 
   @Override
@@ -153,244 +150,172 @@ public class ServiceImpl implements Service {
   }
 
   @Override
-  public Map<String, ServiceComponent> getServiceComponents() {
-    readWriteLock.readLock().lock();
-    try {
-      return Collections.unmodifiableMap(components);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized Map<String, ServiceComponent> getServiceComponents() {
+    return Collections.unmodifiableMap(components);
   }
 
   @Override
-  public void addServiceComponents(
+  public synchronized void addServiceComponents(
       Map<String, ServiceComponent> components) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      for (ServiceComponent sc : components.values()) {
-        addServiceComponent(sc);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+    for (ServiceComponent sc : components.values()) {
+      addServiceComponent(sc);
     }
-
   }
 
   @Override
-  public void addServiceComponent(ServiceComponent component)
+  public synchronized void addServiceComponent(ServiceComponent component)
       throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      // TODO validation
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a ServiceComponent to Service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", serviceComponentName=" + component.getName());
-      }
-      if (components.containsKey(component.getName())) {
-        throw new AmbariException("Cannot add duplicate ServiceComponent"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", serviceComponentName=" + component.getName());
-      }
-      this.components.put(component.getName(), component);
-    } finally {
-      readWriteLock.writeLock().unlock();
+    // TODO validation
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding a ServiceComponent to Service"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + component.getName());
     }
-
+    if (components.containsKey(component.getName())) {
+      throw new AmbariException("Cannot add duplicate ServiceComponent"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + component.getName());
+    }
+    this.components.put(component.getName(), component);
   }
 
   @Override
-  public ServiceComponent addServiceComponent(
+  public synchronized ServiceComponent addServiceComponent(
       String serviceComponentName) throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Adding a ServiceComponent to Service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", serviceComponentName=" + serviceComponentName);
-      }
-      if (components.containsKey(serviceComponentName)) {
-        throw new AmbariException("Cannot add duplicate ServiceComponent"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", serviceComponentName=" + serviceComponentName);
-      }
-      ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
-      this.components.put(component.getName(), component);
-      return component;
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Adding a ServiceComponent to Service"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + serviceComponentName);
     }
-
+    if (components.containsKey(serviceComponentName)) {
+      throw new AmbariException("Cannot add duplicate ServiceComponent"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", serviceComponentName=" + serviceComponentName);
+    }
+    ServiceComponent component = serviceComponentFactory.createNew(this, serviceComponentName);
+    this.components.put(component.getName(), component);
+    return component;
   }
 
   @Override
   public ServiceComponent getServiceComponent(String componentName)
       throws AmbariException {
-    readWriteLock.readLock().lock();
-    try {
-      if (!components.containsKey(componentName)) {
-        throw new ServiceComponentNotFoundException(cluster.getClusterName(),
-            getName(),
-            componentName);
-      }
-      return this.components.get(componentName);
-    } finally {
-      readWriteLock.readLock().unlock();
+    if (!components.containsKey(componentName)) {
+      throw new ServiceComponentNotFoundException(cluster.getClusterName(),
+          getName(),
+          componentName);
     }
-
+    return this.components.get(componentName);
   }
 
   @Override
-  public State getDesiredState() {
-    readWriteLock.readLock().lock();
-    try {
-      return this.serviceDesiredStateEntity.getDesiredState();
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized State getDesiredState() {
+    return this.serviceDesiredStateEntity.getDesiredState();
   }
 
   @Override
-  public void setDesiredState(State state) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredState of Service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", oldDesiredState=" + this.getDesiredState()
-            + ", newDesiredState=" + state);
-      }
-      this.serviceDesiredStateEntity.setDesiredState(state);
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void setDesiredState(State state) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredState of Service"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", oldDesiredState=" + this.getDesiredState()
+          + ", newDesiredState=" + state);
     }
-
+    this.serviceDesiredStateEntity.setDesiredState(state);
+    saveIfPersisted();
   }
 
   @Override
-  public Map<String, Config> getDesiredConfigs() {
-    readWriteLock.readLock().lock();
-    try {
-      Map<String, Config> map = new HashMap<String, Config>();
-      for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-        Config config = cluster.getConfig(entry.getKey(), entry.getValue());
-        if (null != config) {
-          map.put(entry.getKey(), config);
-        } else {
-          // FIXME this is an error - should throw a proper exception
-          throw new RuntimeException("Found an invalid config"
-              + ", clusterName=" + getCluster().getClusterName()
-              + ", serviceName=" + getName()
-              + ", configType=" + entry.getKey()
-              + ", configVersionTag=" + entry.getValue());
-        }
+  public synchronized Map<String, Config> getDesiredConfigs() {
+    Map<String, Config> map = new HashMap<String, Config>();
+    for (Entry<String, String> entry : desiredConfigs.entrySet()) {
+      Config config = cluster.getConfig(entry.getKey(), entry.getValue());
+      if (null != config) {
+        map.put(entry.getKey(), config);
+      } else {
+        // FIXME this is an error - should throw a proper exception
+        throw new RuntimeException("Found an invalid config"
+            + ", clusterName=" + getCluster().getClusterName()
+            + ", serviceName=" + getName()
+            + ", configType=" + entry.getKey()
+            + ", configVersionTag=" + entry.getValue());
       }
-      return Collections.unmodifiableMap(map);
-    } finally {
-      readWriteLock.readLock().unlock();
     }
-
+    return Collections.unmodifiableMap(map);
   }
 
   @Override
-  public void updateDesiredConfigs(Map<String, Config> configs) {
-
-    readWriteLock.writeLock().lock();
-    try {
-      for (Entry<String, Config> entry : configs.entrySet()) {
-        boolean contains = false;
-
-        for (ServiceConfigMappingEntity serviceConfigMappingEntity : serviceEntity.getServiceConfigMappings()) {
-          if (entry.getKey().equals(serviceConfigMappingEntity.getConfigType())) {
-            contains = true;
-            serviceConfigMappingEntity.setTimestamp(new Date().getTime());
-            serviceConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
-          }
-        }
+  public synchronized void updateDesiredConfigs(Map<String, Config> configs) {
 
-        if (!contains) {
-          ServiceConfigMappingEntity newEntity = new ServiceConfigMappingEntity();
-          newEntity.setClusterId(serviceEntity.getClusterId());
-          newEntity.setServiceName(serviceEntity.getServiceName());
-          newEntity.setConfigType(entry.getKey());
-          newEntity.setVersionTag(entry.getValue().getVersionTag());
-          newEntity.setTimestamp(new Date().getTime());
-          newEntity.setServiceEntity(serviceEntity);
-          serviceEntity.getServiceConfigMappings().add(newEntity);
+    for (Entry<String,Config> entry : configs.entrySet()) {
+      boolean contains = false;
 
+      for (ServiceConfigMappingEntity serviceConfigMappingEntity : serviceEntity.getServiceConfigMappings()) {
+        if (entry.getKey().equals(serviceConfigMappingEntity.getConfigType())) {
+          contains = true;
+          serviceConfigMappingEntity.setTimestamp(new Date().getTime());
+          serviceConfigMappingEntity.setVersionTag(entry.getValue().getVersionTag());
         }
+      }
 
+      if (!contains) {
+        ServiceConfigMappingEntity newEntity = new ServiceConfigMappingEntity();
+        newEntity.setClusterId(serviceEntity.getClusterId());
+        newEntity.setServiceName(serviceEntity.getServiceName());
+        newEntity.setConfigType(entry.getKey());
+        newEntity.setVersionTag(entry.getValue().getVersionTag());
+        newEntity.setTimestamp(new Date().getTime());
+        newEntity.setServiceEntity(serviceEntity);
+        serviceEntity.getServiceConfigMappings().add(newEntity);
 
-        this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
       }
 
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+
+      this.desiredConfigs.put(entry.getKey(), entry.getValue().getVersionTag());
     }
 
+    saveIfPersisted();
 
   }
 
   @Override
-  public StackId getDesiredStackVersion() {
-    readWriteLock.readLock().lock();
-    try {
-      return gson.fromJson(serviceDesiredStateEntity.getDesiredStackVersion(), StackId.class);
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized StackId getDesiredStackVersion() {
+    return gson.fromJson(serviceDesiredStateEntity.getDesiredStackVersion(), StackId.class);
   }
 
   @Override
-  public void setDesiredStackVersion(StackId stackVersion) {
-    readWriteLock.writeLock().lock();
-    try {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Setting DesiredStackVersion of Service"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", clusterId=" + cluster.getClusterId()
-            + ", serviceName=" + getName()
-            + ", oldDesiredStackVersion=" + getDesiredStackVersion()
-            + ", newDesiredStackVersion=" + stackVersion);
-      }
-      serviceDesiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
-      saveIfPersisted();
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void setDesiredStackVersion(StackId stackVersion) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Setting DesiredStackVersion of Service"
+          + ", clusterName=" + cluster.getClusterName()
+          + ", clusterId=" + cluster.getClusterId()
+          + ", serviceName=" + getName()
+          + ", oldDesiredStackVersion=" + getDesiredStackVersion()
+          + ", newDesiredStackVersion=" + stackVersion);
     }
-
+    serviceDesiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
+    saveIfPersisted();
   }
 
   @Override
-  public ServiceResponse convertToResponse() {
-    readWriteLock.readLock().lock();
-    try {
-      ServiceResponse r = new ServiceResponse(cluster.getClusterId(),
-          cluster.getClusterName(),
-          getName(),
-          desiredConfigs,
-          getDesiredStackVersion().getStackId(),
-          getDesiredState().toString());
-      return r;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
+  public synchronized ServiceResponse convertToResponse() {
+    ServiceResponse r = new ServiceResponse(cluster.getClusterId(),
+        cluster.getClusterName(),
+        getName(),
+        desiredConfigs,
+        getDesiredStackVersion().getStackId(),
+        getDesiredState().toString());
+    return r;
   }
 
   @Override
@@ -399,72 +324,54 @@ public class ServiceImpl implements Service {
   }
 
   @Override
-  public void debugDump(StringBuilder sb) {
-    readWriteLock.readLock().lock();
-    try {
-      sb.append("Service={ serviceName=" + getName()
-          + ", clusterName=" + cluster.getClusterName()
-          + ", clusterId=" + cluster.getClusterId()
-          + ", desiredStackVersion=" + getDesiredStackVersion()
-          + ", desiredState=" + getDesiredState().toString()
-          + ", configs=[");
-      boolean first = true;
-      if (desiredConfigs != null) {
-        for (Entry<String, String> entry : desiredConfigs.entrySet()) {
-          if (!first) {
-            sb.append(" , ");
-          }
-          first = false;
-          sb.append("{ Config type=" + entry.getKey()
-              + ", versionTag=" + entry.getValue() + "}");
-        }
-      }
-      sb.append("], components=[ ");
-
-      first = true;
-      for (ServiceComponent sc : components.values()) {
+  public synchronized void debugDump(StringBuilder sb) {
+    sb.append("Service={ serviceName=" + getName()
+        + ", clusterName=" + cluster.getClusterName()
+        + ", clusterId=" + cluster.getClusterId()
+        + ", desiredStackVersion=" + getDesiredStackVersion()
+        + ", desiredState=" + getDesiredState().toString()
+        + ", configs=[");
+    boolean first = true;
+    if (desiredConfigs != null) {
+      for (Entry<String, String> entry : desiredConfigs.entrySet()) {
         if (!first) {
           sb.append(" , ");
         }
         first = false;
-        sb.append("\n      ");
-        sc.debugDump(sb);
-        sb.append(" ");
+        sb.append("{ Config type=" + entry.getKey()
+            + ", versionTag=" + entry.getValue() + "}");
       }
-      sb.append(" ] }");
-    } finally {
-      readWriteLock.readLock().unlock();
     }
+    sb.append("], components=[ ");
 
+    first = true;
+    for(ServiceComponent sc : components.values()) {
+      if (!first) {
+        sb.append(" , ");
+      }
+      first = false;
+      sb.append("\n      ");
+      sc.debugDump(sb);
+      sb.append(" ");
+    }
+    sb.append(" ] }");
   }
 
   @Override
-  public boolean isPersisted() {
-    readWriteLock.readLock().lock();
-    try {
+  public synchronized boolean isPersisted() {
       return persisted;
-    } finally {
-      readWriteLock.readLock().unlock();
-    }
-
   }
 
   @Override
-  public void persist() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (!persisted) {
-        persistEntities();
-        refresh();
-        cluster.refresh();
-        persisted = true;
-      } else {
-        saveIfPersisted();
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void persist() {
+    if (!persisted) {
+      persistEntities();
+      refresh();
+      cluster.refresh();
+      persisted = true;
+    } else {
+      saveIfPersisted();
     }
-
   }
 
   @Transactional
@@ -491,102 +398,78 @@ public class ServiceImpl implements Service {
 
   @Override
   @Transactional
-  public void refresh() {
-    readWriteLock.writeLock().lock();
-    try {
-      if (isPersisted()) {
-        ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
-        pk.setClusterId(getClusterId());
-        pk.setServiceName(getName());
-        serviceEntity = clusterServiceDAO.findByPK(pk);
-        serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
-        clusterServiceDAO.refresh(serviceEntity);
-        serviceDesiredStateDAO.refresh(serviceDesiredStateEntity);
-      }
-    } finally {
-      readWriteLock.writeLock().unlock();
+  public synchronized void refresh() {
+    if (isPersisted()) {
+      ClusterServiceEntityPK pk = new ClusterServiceEntityPK();
+      pk.setClusterId(getClusterId());
+      pk.setServiceName(getName());
+      serviceEntity = clusterServiceDAO.findByPK(pk);
+      serviceDesiredStateEntity = serviceEntity.getServiceDesiredStateEntity();
+      clusterServiceDAO.refresh(serviceEntity);
+      serviceDesiredStateDAO.refresh(serviceDesiredStateEntity);
     }
-
   }
 
   @Override
-  public boolean canBeRemoved() {
-    readWriteLock.readLock().lock();
-    try {
-      if (!getDesiredState().isRemovableState()) {
-        return false;
-      }
+  public synchronized boolean canBeRemoved() {
+    if (!getDesiredState().isRemovableState()) {
+      return false;
+    }
 
-      for (ServiceComponent sc : components.values()) {
-        if (!sc.canBeRemoved()) {
-          LOG.warn("Found non removable component when trying to delete service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", serviceName=" + getName()
-              + ", componentName=" + sc.getName());
-          return false;
-        }
+    for (ServiceComponent sc : components.values()) {
+      if (!sc.canBeRemoved()) {
+        LOG.warn("Found non removable component when trying to delete service"
+            + ", clusterName=" + cluster.getClusterName()
+            + ", serviceName=" + getName()
+            + ", componentName=" + sc.getName());
+        return false;
       }
-      return true;
-    } finally {
-      readWriteLock.readLock().unlock();
     }
-
+    return true;
   }
 
   @Override
   @Transactional
-  public void deleteAllComponents() throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      LOG.info("Deleting all components for service"
-          + ", clusterName=" + cluster.getClusterName()
-          + ", serviceName=" + getName());
-      // FIXME check dependencies from meta layer
-      for (ServiceComponent component : components.values()) {
-        if (!component.canBeRemoved()) {
-          throw new AmbariException("Found non removable component when trying to"
-              + " delete all components from service"
-              + ", clusterName=" + cluster.getClusterName()
-              + ", serviceName=" + getName()
-              + ", componentName=" + component.getName());
-        }
-      }
-
-      for (ServiceComponent serviceComponent : components.values()) {
-        serviceComponent.delete();
+  public synchronized void deleteAllComponents() throws AmbariException {
+    LOG.info("Deleting all components for service"
+        + ", clusterName=" + cluster.getClusterName()
+        + ", serviceName=" + getName());
+    // FIXME check dependencies from meta layer
+    for (ServiceComponent component : components.values()) {
+      if (!component.canBeRemoved()) {
+        throw new AmbariException("Found non removable component when trying to"
+            + " delete all components from service"
+            + ", clusterName=" + cluster.getClusterName()
+            + ", serviceName=" + getName()
+            + ", componentName=" + component.getName());
       }
+    }
 
-      components.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    for (ServiceComponent serviceComponent : components.values()) {
+      serviceComponent.delete();
     }
 
+    components.clear();
   }
 
   @Override
-  public void deleteServiceComponent(String componentName)
+  public synchronized void deleteServiceComponent(String componentName)
       throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      ServiceComponent component = getServiceComponent(componentName);
-      LOG.info("Deleting servicecomponent for cluster"
+    ServiceComponent component = getServiceComponent(componentName);
+    LOG.info("Deleting servicecomponent for cluster"
+        + ", clusterName=" + cluster.getClusterName()
+        + ", serviceName=" + getName()
+        + ", componentName=" + componentName);
+    // FIXME check dependencies from meta layer
+    if (!component.canBeRemoved()) {
+      throw new AmbariException("Could not delete component from cluster"
           + ", clusterName=" + cluster.getClusterName()
           + ", serviceName=" + getName()
           + ", componentName=" + componentName);
-      // FIXME check dependencies from meta layer
-      if (!component.canBeRemoved()) {
-        throw new AmbariException("Could not delete component from cluster"
-            + ", clusterName=" + cluster.getClusterName()
-            + ", serviceName=" + getName()
-            + ", componentName=" + componentName);
-      }
-
-      component.delete();
-      components.remove(componentName);
-    } finally {
-      readWriteLock.writeLock().unlock();
     }
 
+    component.delete();
+    components.remove(componentName);
   }
 
   @Override
@@ -596,21 +479,15 @@ public class ServiceImpl implements Service {
 
   @Override
   @Transactional
-  public void delete() throws AmbariException {
-    readWriteLock.writeLock().lock();
-    try {
-      deleteAllComponents();
-
-      if (persisted) {
-        removeEntities();
-        persisted = false;
-      }
+  public synchronized void delete() throws AmbariException {
+    deleteAllComponents();
 
-      desiredConfigs.clear();
-    } finally {
-      readWriteLock.writeLock().unlock();
+    if (persisted) {
+      removeEntities();
+      persisted = false;
     }
 
+    desiredConfigs.clear();
   }
 
   @Transactional

+ 5 - 17
ambari-server/src/main/java/org/apache/ambari/server/state/host/HostImpl.java

@@ -355,9 +355,6 @@ public class HostImpl implements Host {
     }
   }
 
-  /**
-   * @param hostInfo
-   */
   @Override
   public void importHostInfo(HostInfo hostInfo) {
     try {
@@ -460,26 +457,17 @@ public class HostImpl implements Host {
     }
   }
 
+  /**
+   * @param hostInfo
+   */
   @Override
   public void setLastAgentEnv(AgentEnv env) {
-    writeLock.lock();
-    try {
-      lastAgentEnv = env;
-    } finally {
-      writeLock.unlock();
-    }
-
+    lastAgentEnv = env;
   }
   
   @Override
   public AgentEnv getLastAgentEnv() {
-    readLock.lock();
-    try {
-      return lastAgentEnv;
-    } finally {
-      readLock.unlock();
-    }
-
+    return lastAgentEnv;
   }
 
   @Override

+ 77 - 126
ambari-server/src/main/java/org/apache/ambari/server/state/svccomphost/ServiceComponentHostImpl.java

@@ -64,9 +64,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   // FIXME need more debug logs
 
-  private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
-  private final Lock readLock = readWriteLock.readLock();
-  private final Lock writeLock = readWriteLock.writeLock();
+  private final Lock readLock;
+  private final Lock writeLock;
 
   private final ServiceComponent serviceComponent;
   private final Host host;
@@ -601,6 +600,9 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
       this.stateMachine = daemonStateMachineFactory.make(this);
     }
 
+    ReadWriteLock rwLock = new ReentrantReadWriteLock();
+    this.readLock = rwLock.readLock();
+    this.writeLock = rwLock.writeLock();
     this.serviceComponent = serviceComponent;
 
     stateEntity = new HostComponentStateEntity();
@@ -639,6 +641,9 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
                                   @Assisted HostComponentDesiredStateEntity desiredStateEntity,
                                   Injector injector) {
     injector.injectMembers(this);
+    ReadWriteLock rwLock = new ReentrantReadWriteLock();
+    this.readLock = rwLock.readLock();
+    this.writeLock = rwLock.writeLock();
     this.serviceComponent = serviceComponent;
 
 
@@ -673,8 +678,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public State getState() {
-    readLock.lock();
     try {
+      readLock.lock();
       return stateMachine.getCurrentState();
     }
     finally {
@@ -684,8 +689,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void setState(State state) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       stateMachine.setCurrentState(state);
       stateEntity.setCurrentState(state);
       saveIfPersisted();
@@ -741,32 +746,20 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public String getServiceComponentName() {
-    readLock.lock();
-    try {
-      return serviceComponent.getName();
-    } finally {
-      readLock.unlock();
-    }
-
+    return serviceComponent.getName();
   }
 
   @Override
   public String getHostName() {
-    readLock.lock();
-    try {
-      return host.getHostName();
-    } finally {
-      readLock.unlock();
-    }
-
+    return host.getHostName();
   }
 
   /**
    * @return the lastOpStartTime
    */
   public long getLastOpStartTime() {
-    readLock.lock();
     try {
+      readLock.lock();
       return lastOpStartTime;
     }
     finally {
@@ -778,8 +771,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    * @param lastOpStartTime the lastOpStartTime to set
    */
   public void setLastOpStartTime(long lastOpStartTime) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       this.lastOpStartTime = lastOpStartTime;
     }
     finally {
@@ -791,8 +784,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    * @return the lastOpEndTime
    */
   public long getLastOpEndTime() {
-    readLock.lock();
     try {
+      readLock.lock();
       return lastOpEndTime;
     }
     finally {
@@ -804,8 +797,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    * @param lastOpEndTime the lastOpEndTime to set
    */
   public void setLastOpEndTime(long lastOpEndTime) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       this.lastOpEndTime = lastOpEndTime;
     }
     finally {
@@ -817,8 +810,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    * @return the lastOpLastUpdateTime
    */
   public long getLastOpLastUpdateTime() {
-    readLock.lock();
     try {
+      readLock.lock();
       return lastOpLastUpdateTime;
     }
     finally {
@@ -830,8 +823,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
    * @param lastOpLastUpdateTime the lastOpLastUpdateTime to set
    */
   public void setLastOpLastUpdateTime(long lastOpLastUpdateTime) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       this.lastOpLastUpdateTime = lastOpLastUpdateTime;
     }
     finally {
@@ -841,29 +834,17 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public long getClusterId() {
-    readLock.lock();
-    try {
-      return serviceComponent.getClusterId();
-    } finally {
-      readLock.unlock();
-    }
-
+    return serviceComponent.getClusterId();
   }
 
   @Override
   public String getServiceName() {
-    readLock.lock();
-    try {
-      return serviceComponent.getServiceName();
-    } finally {
-      readLock.unlock();
-    }
-
+    return serviceComponent.getServiceName();
   }
 
   Map<String, String> getConfigVersions() {
-    readLock.lock();
     try {
+      readLock.lock();
       if (this.configs != null) {
         return Collections.unmodifiableMap(configs);
       } else {
@@ -878,8 +859,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public Map<String, Config> getConfigs() throws AmbariException {
-    readLock.lock();
     try {
+      readLock.lock();
       Map<String, Config> map = new HashMap<String, Config>();
       Cluster cluster = clusters.getClusterById(getClusterId());
       for (Entry<String, String> entry : configs.entrySet()) {
@@ -898,8 +879,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Transactional
   void setConfigs(Map<String, String> configs) {
-    writeLock.lock();
     try {
+      writeLock.lock();
 
       Set<String> deletedTypes = new HashSet<String>();
       for (String type : this.configs.keySet()) {
@@ -990,8 +971,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public StackId getStackVersion() {
-    readLock.lock();
     try {
+      readLock.lock();
       return gson.fromJson(stateEntity.getCurrentStackVersion(), StackId.class);
     }
     finally {
@@ -1001,8 +982,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void setStackVersion(StackId stackVersion) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       stateEntity.setCurrentStackVersion(gson.toJson(stackVersion));
       saveIfPersisted();
     }
@@ -1014,8 +995,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public State getDesiredState() {
-    readLock.lock();
     try {
+      readLock.lock();
       return desiredStateEntity.getDesiredState();
     }
     finally {
@@ -1025,8 +1006,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void setDesiredState(State state) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       desiredStateEntity.setDesiredState(state);
       saveIfPersisted();
     }
@@ -1037,8 +1018,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public Map<String, String> getDesiredConfigVersionsRecursive() {
-    readLock.lock();
     try {
+      readLock.lock();
       Map<String, String> fullDesiredConfigVersions =
           new HashMap<String, String>();
       Map<String, Config> desiredConfs = getDesiredConfigs();
@@ -1056,8 +1037,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   @Override
   public Map<String, Config> getDesiredConfigs() {
     Map<String, Config> map = new HashMap<String, Config>();
-    readLock.lock();
     try {
+      readLock.lock();
       for (Entry<String, String> entry : desiredConfigs.entrySet()) {
         Config config = clusters.getClusterById(getClusterId()).getConfig(
             entry.getKey(), entry.getValue());
@@ -1086,8 +1067,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   @Override
   @Transactional
   public void updateDesiredConfigs(Map<String, Config> configs) {
-    writeLock.lock();
     try {
+      writeLock.lock();
 
       for (Entry<String,Config> entry : configs.entrySet()) {
 
@@ -1128,8 +1109,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public StackId getDesiredStackVersion() {
-    readLock.lock();
     try {
+      readLock.lock();
       return gson.fromJson(desiredStateEntity.getDesiredStackVersion(), StackId.class);
     }
     finally {
@@ -1139,8 +1120,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void setDesiredStackVersion(StackId stackVersion) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       desiredStateEntity.setDesiredStackVersion(gson.toJson(stackVersion));
       saveIfPersisted();
     }
@@ -1151,8 +1132,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public ServiceComponentHostResponse convertToResponse() {
-    readLock.lock();
     try {
+      readLock.lock();
       ServiceComponentHostResponse r = new ServiceComponentHostResponse(
           serviceComponent.getClusterName(),
           serviceComponent.getServiceName(),
@@ -1176,19 +1157,13 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public String getClusterName() {
-    readLock.lock();
-    try {
-      return serviceComponent.getClusterName();
-    } finally {
-      readLock.unlock();
-    }
-
+    return serviceComponent.getClusterName();
   }
 
   @Override
   public void debugDump(StringBuilder sb) {
-    readLock.lock();
     try {
+      readLock.lock();
       sb.append("ServiceComponentHost={ hostname=" + getHostName()
           + ", serviceComponentName=" + serviceComponent.getName()
           + ", clusterName=" + serviceComponent.getClusterName()
@@ -1206,8 +1181,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public boolean isPersisted() {
-    readLock.lock();
     try {
+      readLock.lock();
       return persisted;
     } finally {
       readLock.unlock();
@@ -1216,8 +1191,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void persist() {
-    writeLock.lock();
     try {
+      writeLock.lock();
       if (!persisted) {
         persistEntities();
         refresh();
@@ -1260,29 +1235,23 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   @Transactional
-  public void refresh() {
-    writeLock.lock();
-    try {
-      if (isPersisted()) {
-        HostComponentStateEntityPK pk = new HostComponentStateEntityPK();
-        HostComponentDesiredStateEntityPK dpk = new HostComponentDesiredStateEntityPK();
-        pk.setClusterId(getClusterId());
-        pk.setComponentName(getServiceComponentName());
-        pk.setServiceName(getServiceName());
-        pk.setHostName(getHostName());
-        dpk.setClusterId(getClusterId());
-        dpk.setComponentName(getServiceComponentName());
-        dpk.setServiceName(getServiceName());
-        dpk.setHostName(getHostName());
-        stateEntity = hostComponentStateDAO.findByPK(pk);
-        desiredStateEntity = hostComponentDesiredStateDAO.findByPK(dpk);
-        hostComponentStateDAO.refresh(stateEntity);
-        hostComponentDesiredStateDAO.refresh(desiredStateEntity);
-      }
-    } finally {
-      writeLock.unlock();
+  public synchronized void refresh() {
+    if (isPersisted()) {
+      HostComponentStateEntityPK pk = new HostComponentStateEntityPK();
+      HostComponentDesiredStateEntityPK dpk = new HostComponentDesiredStateEntityPK();
+      pk.setClusterId(getClusterId());
+      pk.setComponentName(getServiceComponentName());
+      pk.setServiceName(getServiceName());
+      pk.setHostName(getHostName());
+      dpk.setClusterId(getClusterId());
+      dpk.setComponentName(getServiceComponentName());
+      dpk.setServiceName(getServiceName());
+      dpk.setHostName(getHostName());
+      stateEntity = hostComponentStateDAO.findByPK(pk);
+      desiredStateEntity = hostComponentDesiredStateDAO.findByPK(dpk);
+      hostComponentStateDAO.refresh(stateEntity);
+      hostComponentDesiredStateDAO.refresh(desiredStateEntity);
     }
-
   }
 
   @Transactional
@@ -1294,9 +1263,9 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   }
 
   @Override
-  public boolean canBeRemoved() {
-    readLock.lock();
+  public synchronized boolean canBeRemoved() {
     try {
+      readLock.lock();
 
       return (getDesiredState().isRemovableState() &&
               getState().isRemovableState());
@@ -1308,8 +1277,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void deleteDesiredConfigs(Set<String> configTypes) {
-    writeLock.lock();
     try {
+      writeLock.lock();
       hostComponentDesiredConfigMappingDAO.removeByType(configTypes);
       for (String configType : configTypes) {
         desiredConfigs.remove(configType);
@@ -1321,8 +1290,8 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
 
   @Override
   public void delete() {
-    writeLock.lock();
     try {
+      writeLock.lock();
       if (persisted) {
         removeEntities();
         persisted = false;
@@ -1355,53 +1324,35 @@ public class ServiceComponentHostImpl implements ServiceComponentHost {
   
   @Override
   public void updateActualConfigs(Map<String, Map<String, String>> configTags) {
-    writeLock.lock();
-    try {
-      actualConfigs = new HashMap<String, DesiredConfig>();
-
-      String hostName = getHostName();
-
-      for (Entry<String, Map<String, String>> entry : configTags.entrySet()) {
-        String type = entry.getKey();
-        Map<String, String> values = entry.getValue();
-
-        String tag = values.get("tag");
-        String hostTag = values.get("host_override_tag");
-
-        DesiredConfig dc = new DesiredConfig();
-        dc.setVersion(tag);
-        actualConfigs.put(type, dc);
-        if (null != hostTag && null != hostName) {
-          List<HostOverride> list = new ArrayList<HostOverride>();
-          list.add(new HostOverride(hostName, hostTag));
-          dc.setHostOverrides(list);
-        }
+    actualConfigs = new HashMap<String, DesiredConfig>();
+    
+    String hostName = getHostName();
+    
+    for (Entry<String, Map<String,String>> entry : configTags.entrySet()) {
+      String type = entry.getKey();
+      Map<String, String> values = entry.getValue();
+      
+      String tag = values.get("tag");
+      String hostTag = values.get("host_override_tag");
+      
+      DesiredConfig dc = new DesiredConfig();
+      dc.setVersion(tag);
+      actualConfigs.put(type, dc);
+      if (null != hostTag && null != hostName) {
+        List<HostOverride> list = new ArrayList<HostOverride>();
+        list.add (new HostOverride(hostName, hostTag));
+        dc.setHostOverrides(list);
       }
-    } finally {
-      writeLock.unlock();
     }
-
   }
   
   @Override
   public Map<String, DesiredConfig> getActualConfigs() {
-    readLock.lock();
-    try {
-      return actualConfigs;
-    } finally {
-      readLock.unlock();
-    }
-
+    return actualConfigs;
   }
 
   @Override
   public HostState getHostState() {
-    readLock.lock();
-    try {
-      return host.getState();
-    } finally {
-      readLock.unlock();
-    }
-
+    return host.getState();
   }
 }

+ 1 - 1
ambari-server/src/main/resources/Ambari-DDL-Postgres-CREATE.sql

@@ -177,7 +177,7 @@ insert into ambari.user_roles(role_name, user_id)
 select 'admin',1;
 
 insert into ambari.metainfo(metainfo_key, metainfo_value)
-select 'version','${ambariVersion}';
+select 'version','1.3.0';
 
 COMMIT;
 

+ 0 - 1058
ambari-server/src/main/resources/ganglia_properties.json

@@ -6223,1064 +6223,6 @@
       }
     },
 
-    "RESOURCEMANAGER":{
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/yarn/Queue/running_0":{
-        "metric":"yarn.Queue.running_0",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/running_60":{
-        "metric":"yarn.Queue.running_60",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/running_300":{
-        "metric":"yarn.Queue.running_300",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/running_1440":{
-        "metric":"yarn.Queue.running_1440",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsSubmitted":{
-        "metric":"yarn.Queue.AppsSubmitted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsRunning":{
-        "metric":"yarn.Queue.AppsRunning",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsPending":{
-        "metric":"yarn.Queue.AppsPending",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsCompleted":{
-        "metric":"yarn.Queue.AppsCompleted",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsKilled":{
-        "metric":"yarn.Queue.AppsKilled",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AppsFailed":{
-        "metric":"yarn.Queue.AppsFailed",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AllocatedMB":{
-        "metric":"yarn.Queue.AllocatedMB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AllocatedContainers":{
-        "metric":"yarn.Queue.AllocatedContainers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AggregateContainersAllocated":{
-        "metric":"yarn.Queue.AggregateContainersAllocated",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AggregateContainersReleased":{
-        "metric":"yarn.Queue.AggregateContainersReleased",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/AvailableMB":{
-        "metric":"yarn.Queue.AvailableMB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/PendingMB":{
-        "metric":"yarn.Queue.PendingMB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/PendingContainers":{
-        "metric":"yarn.Queue.PendingContainers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/ReservedMB":{
-        "metric":"yarn.Queue.ReservedMB",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/ReservedContainers":{
-        "metric":"yarn.Queue.ReservedContainers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/ActiveUsers":{
-        "metric":"yarn.Queue.ActiveUsers",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/yarn/Queue/ActiveApplications":{
-        "metric":"yarn.Queue.ActiveApplications",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/ugi/LoginSuccessNumOps":{
-        "metric":"ugi.ugi.LoginSuccessNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginSuccessAvgTime":{
-        "metric":"ugi.ugi.LoginSuccessAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureNumOps":{
-        "metric":"ugi.ugi.LoginFailureNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureAvgTime":{
-        "metric":"ugi.ugi.LoginFailureAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.metrics.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.metrics.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeNumOps":{
-        "metric":"rpc.metrics.RpcQueueTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeAvgTime":{
-        "metric":"rpc.metrics.RpcQueueTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeNumOps":{
-        "metric":"rpc.metrics.RpcProcessingTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeAvgTime":{
-        "metric":"rpc.metrics.RpcProcessingTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationFailures":{
-        "metric":"rpc.metrics.RpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationFailures":{
-        "metric":"rpc.metrics.RpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.metrics.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/CallQueueLength":{
-        "metric":"rpc.metrics.CallQueueLength",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/jvm/MemNonHeapUsedM":{
-        "metric":"jvm.metrics.MemNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemNonHeapCommittedM":{
-        "metric":"jvm.metrics.MemNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapUsedM":{
-        "metric":"jvm.metrics.MemHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapCommittedM":{
-        "metric":"jvm.metrics.MemHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcCount":{
-        "metric":"jvm.metrics.GcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcTimeMillis":{
-        "metric":"jvm.metrics.GcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogFatal":{
-        "metric":"jvm.metrics.LogFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogError":{
-        "metric":"jvm.metrics.LogError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogWarn":{
-        "metric":"jvm.metrics.LogWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogInfo":{
-        "metric":"jvm.metrics.LogInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsNew":{
-        "metric":"jvm.metrics.ThreadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsRunnable":{
-        "metric":"jvm.metrics.ThreadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsBlocked":{
-        "metric":"jvm.metrics.ThreadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsWaiting":{
-        "metric":"jvm.metrics.ThreadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTimedWaiting":{
-        "metric":"jvm.metrics.ThreadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTerminated":{
-        "metric":"jvm.metrics.ThreadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      }
-
-    },
-
-    "NODEMANAGER":{
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/mapred/ShuffleOutputBytes":{
-        "metric":"mapred.ShuffleOutputBytes",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/ShuffleOutputsFailed":{
-        "metric":"mapred.ShuffleOutputsFailed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/ShuffleOutputsOK":{
-        "metric":"mapred.ShuffleOutputsOK",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/mapred/ShuffleConnections":{
-        "metric":"mapred.ShuffleConnections",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/yarn/ContainersLaunched":{
-        "metric":"yarn.ContainersLaunched",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/ContainersCompleted":{
-        "metric":"yarn.ContainersCompleted",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/ContainersFailed":{
-        "metric":"yarn.ContainersFailed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/ContainersKilled":{
-        "metric":"yarn.ContainersKilled",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/ContainersIniting":{
-        "metric":"yarn.ContainersIniting",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/ContainersRunning":{
-        "metric":"yarn.ContainersRunning",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/AllocatedGB":{
-        "metric":"yarn.AllocatedGB",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/AllocatedContainers":{
-        "metric":"yarn.AllocatedContainers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/yarn/AvailableGB":{
-        "metric":"yarn.AvailableGB",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/jvm/MemNonHeapUsedM":{
-        "metric":"jvm.metrics.MemNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemNonHeapCommittedM":{
-        "metric":"jvm.metrics.MemNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapUsedM":{
-        "metric":"jvm.metrics.MemHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapCommittedM":{
-        "metric":"jvm.metrics.MemHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcCount":{
-        "metric":"jvm.metrics.GcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcTimeMillis":{
-        "metric":"jvm.metrics.GcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogFatal":{
-        "metric":"jvm.metrics.LogFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogError":{
-        "metric":"jvm.metrics.LogError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogWarn":{
-        "metric":"jvm.metrics.LogWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogInfo":{
-        "metric":"jvm.metrics.LogInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsNew":{
-        "metric":"jvm.metrics.ThreadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsRunnable":{
-        "metric":"jvm.metrics.ThreadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsBlocked":{
-        "metric":"jvm.metrics.ThreadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsWaiting":{
-        "metric":"jvm.metrics.ThreadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTimedWaiting":{
-        "metric":"jvm.metrics.ThreadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTerminated":{
-        "metric":"jvm.metrics.ThreadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.metrics.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.metrics.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeNumOps":{
-        "metric":"rpc.metrics.RpcQueueTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeAvgTime":{
-        "metric":"rpc.metrics.RpcQueueTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeNumOps":{
-        "metric":"rpc.metrics.RpcProcessingTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeAvgTime":{
-        "metric":"rpc.metrics.RpcProcessingTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationFailures":{
-        "metric":"rpc.metrics.RpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationFailures":{
-        "metric":"rpc.metrics.RpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.metrics.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/CallQueueLength":{
-        "metric":"rpc.metrics.CallQueueLength",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/ugi/LoginSuccessNumOps":{
-        "metric":"ugi.ugi.LoginSuccessNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginSuccessAvgTime":{
-        "metric":"ugi.ugi.LoginSuccessAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureNumOps":{
-        "metric":"ugi.ugi.LoginFailureNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureAvgTime":{
-        "metric":"ugi.ugi.LoginFailureAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      }
-
-    },
-
-    "HISTORYSERVER":{
-      "metrics/boottime":{
-        "metric":"boottime",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_aidle":{
-        "metric":"cpu_aidle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_idle":{
-        "metric":"cpu_idle",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_nice":{
-        "metric":"cpu_nice",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_num":{
-        "metric":"cpu_num",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_speed":{
-        "metric":"cpu_speed",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_system":{
-        "metric":"cpu_system",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_user":{
-        "metric":"cpu_user",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/cpu/cpu_wio":{
-        "metric":"cpu_wio",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_free":{
-        "metric":"disk_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/disk_total":{
-        "metric":"disk_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/disk/part_max_used":{
-        "metric":"part_max_used",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/memory/mem_buffers":{
-        "metric":"mem_buffers",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_cached":{
-        "metric":"mem_cached",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_free":{
-        "metric":"mem_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_shared":{
-        "metric":"mem_shared",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/mem_total":{
-        "metric":"mem_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_free":{
-        "metric":"swap_free",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/memory/swap_total":{
-        "metric":"swap_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/network/bytes_in":{
-        "metric":"bytes_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/bytes_out":{
-        "metric":"bytes_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_in":{
-        "metric":"pkts_in",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/network/pkts_out":{
-        "metric":"pkts_out",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/process/proc_run":{
-        "metric":"proc_run",
-        "pointInTime":true,
-        "temporal":true
-      },
-      "metrics/process/proc_total":{
-        "metric":"proc_total",
-        "pointInTime":true,
-        "temporal":true
-      },
-
-      "metrics/jvm/MemNonHeapUsedM":{
-        "metric":"jvm.metrics.MemNonHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemNonHeapCommittedM":{
-        "metric":"jvm.metrics.MemNonHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapUsedM":{
-        "metric":"jvm.metrics.MemHeapUsedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/MemHeapCommittedM":{
-        "metric":"jvm.metrics.MemHeapCommittedM",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcCount":{
-        "metric":"jvm.metrics.GcCount",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/GcTimeMillis":{
-        "metric":"jvm.metrics.GcTimeMillis",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogFatal":{
-        "metric":"jvm.metrics.LogFatal",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogError":{
-        "metric":"jvm.metrics.LogError",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogWarn":{
-        "metric":"jvm.metrics.LogWarn",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/LogInfo":{
-        "metric":"jvm.metrics.LogInfo",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsNew":{
-        "metric":"jvm.metrics.ThreadsNew",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsRunnable":{
-        "metric":"jvm.metrics.ThreadsRunnable",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsBlocked":{
-        "metric":"jvm.metrics.ThreadsBlocked",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsWaiting":{
-        "metric":"jvm.metrics.ThreadsWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTimedWaiting":{
-        "metric":"jvm.metrics.ThreadsTimedWaiting",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/jvm/ThreadsTerminated":{
-        "metric":"jvm.metrics.ThreadsTerminated",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/rpc/ReceivedBytes":{
-        "metric":"rpc.metrics.ReceivedBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/SentBytes":{
-        "metric":"rpc.metrics.SentBytes",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeNumOps":{
-        "metric":"rpc.metrics.RpcQueueTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcQueueTimeAvgTime":{
-        "metric":"rpc.metrics.RpcQueueTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeNumOps":{
-        "metric":"rpc.metrics.RpcProcessingTimeNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcProcessingTimeAvgTime":{
-        "metric":"rpc.metrics.RpcProcessingTimeAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationFailures":{
-        "metric":"rpc.metrics.RpcAuthenticationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthenticationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthenticationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationFailures":{
-        "metric":"rpc.metrics.RpcAuthorizationFailures",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/RpcAuthorizationSuccesses":{
-        "metric":"rpc.metrics.RpcAuthorizationSuccesses",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/NumOpenConnections":{
-        "metric":"rpc.metrics.NumOpenConnections",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/rpc/CallQueueLength":{
-        "metric":"rpc.metrics.CallQueueLength",
-        "pointInTime":false,
-        "temporal":true
-      },
-
-      "metrics/ugi/LoginSuccessNumOps":{
-        "metric":"ugi.ugi.LoginSuccessNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginSuccessAvgTime":{
-        "metric":"ugi.ugi.LoginSuccessAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureNumOps":{
-        "metric":"ugi.ugi.LoginFailureNumOps",
-        "pointInTime":false,
-        "temporal":true
-      },
-      "metrics/ugi/LoginFailureAvgTime":{
-        "metric":"ugi.ugi.LoginFailureAvgTime",
-        "pointInTime":false,
-        "temporal":true
-      }
-
-    },
-
     "HBASE_MASTER":{
 
       "metrics/boottime":{

+ 1 - 1
ambari-server/src/main/resources/mysql-ddl.sql

@@ -102,7 +102,7 @@ insert into ambari.user_roles(role_name, user_id)
   select 'admin',1;
 
 insert into ambari.metainfo(`metainfo_key`, `metainfo_value`)
-  select 'version','${ambariVersion}';
+  select 'version','1.3.0';
 
 
 

+ 1 - 1
ambari-server/src/main/resources/oracle-DDL.sql

@@ -117,7 +117,7 @@ GRANT ALL ON metainfo TO &1;
 INSERT INTO ambari_sequences(sequence_name, value) values ('host_role_command_id_seq', 0);
 INSERT INTO ambari_sequences(sequence_name, value) values ('user_id_seq', 1);
 INSERT INTO ambari_sequences(sequence_name, value) values ('cluster_id_seq', 0);
-INSERT INTO metainfo("metainfo_key", "metainfo_value") values ('version', '${ambariVersion}');
+INSERT INTO metainfo("metainfo_key", "metainfo_value") values ('version', '1.3.0');
 
 insert into Roles(role_name)
 select 'admin' from dual

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5</version>
 
     <components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.1/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5</version>
 
     <components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.0/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5.1.3.0.0</version>
 
     <components>

+ 0 - 22
ambari-server/src/main/resources/stacks/HDP/2.0.1/metainfo.xml

@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>true</active>
-    </versions>
-</metainfo>

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/container-executor.cfg → ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/container-executor.cfg


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/core-site.xml → ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/core-site.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-queue-acls.xml → ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-queue-acls.xml


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/configuration/mapred-site.xml → ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/configuration/mapred-site.xml


+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCE2/metainfo.xml → ambari-server/src/main/resources/stacks/HDP/2.0.1/services/MAPREDUCEv2/metainfo.xml

@@ -25,7 +25,7 @@
             <category>MASTER</category>
         </component>
         <component>
-            <name>MAPREDUCE2_CLIENT</name>
+            <name>MAPREDUCEv2_CLIENT</name>
             <category>CLIENT</category>
         </component>
     </components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.1/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5.22-1</version>
 
     <components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5</version>
 
     <components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.1/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5</version>
 
     <components>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.3.0/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5.1.3.0.0</version>
 
     <components>

+ 6 - 40
ambari-server/src/main/resources/upgrade/ddl/Ambari-DDL-Postgres-UPGRADE-1.3.0.sql

@@ -26,6 +26,8 @@ ALTER TABLE ambari.hosts
 ALTER TABLE ambari.clusterstate
   ADD COLUMN current_stack_version VARCHAR(255) NOT NULL;
 
+CREATE TABLE ambari.metainfo ("metainfo_key" VARCHAR(255), "metainfo_value" VARCHAR, PRIMARY KEY("metainfo_key"));
+GRANT ALL PRIVILEGES ON TABLE ambari.metainfo TO :username;
 
 CREATE TABLE ambari.hostconfigmapping (cluster_id bigint NOT NULL, host_name VARCHAR(255) NOT NULL, type_name VARCHAR(255) NOT NULL, version_tag VARCHAR(255) NOT NULL, service_name VARCHAR(255), create_timestamp BIGINT NOT NULL, selected INTEGER NOT NULL DEFAULT 0, user_name VARCHAR(255) NOT NULL DEFAULT '_db', PRIMARY KEY (cluster_id, host_name, type_name, create_timestamp));
 GRANT ALL PRIVILEGES ON TABLE ambari.hostconfigmapping TO :username;
@@ -55,48 +57,10 @@ drop sequence ambari.host_role_command_task_id_seq;
 drop sequence ambari.users_user_id_seq;
 drop sequence ambari.clusters_cluster_id_seq;
 
-CREATE LANGUAGE plpgsql;
-
 BEGIN;
 
-CREATE OR REPLACE FUNCTION create_or_update_metainfo_table(ambariVersion VARCHAR, userName TEXT) RETURNS text AS
-$$
-DECLARE
-    version VARCHAR(255) := 'version';
-    fqtn text := 'ambari.metainfo';
-BEGIN
-    IF NOT EXISTS (SELECT * FROM pg_tables WHERE tablename = 'metainfo') THEN
-        EXECUTE 'CREATE TABLE '
-        || fqtn
-        || ' (metainfo_key VARCHAR(255), metainfo_value VARCHAR, PRIMARY KEY(metainfo_key));';
-        EXECUTE 'GRANT ALL PRIVILEGES ON TABLE '
-        || fqtn
-        || ' TO '
-        || $2
-        || ';';
-        EXECUTE 'INSERT INTO '
-        || fqtn
-        || '(metainfo_key, metainfo_value) select '
-        || quote_literal(version)
-        || ','
-        || quote_literal($1)
-        || ';';
-        RETURN 'INFO: metainfo was created';
-    ELSE
-        EXECUTE 'UPDATE '
-        || fqtn
-        || ' SET metainfo_value = '
-        || quote_literal($1)
-        || ' WHERE metainfo_key = '
-        || quote_literal(version)
-        || ';';
-        RETURN 'INFO: metainfo was updated';
-    END IF;
-END;
-$$
-LANGUAGE 'plpgsql';
-
-SELECT create_or_update_metainfo_table('${ambariVersion}',:username);
+insert into ambari.metainfo(metainfo_key, metainfo_value)
+select 'version','1.3.0';
 
 COMMIT;
 
@@ -105,6 +69,8 @@ UPDATE ambari.hostcomponentstate SET current_state = 'INSTALLED' WHERE current_s
 
 -- service to cluster level config mappings move. idempotent update
 
+CREATE LANGUAGE plpgsql;
+
 CREATE OR REPLACE FUNCTION update_clusterconfigmapping()
   RETURNS void AS
 $_$

+ 2 - 2
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -82,7 +82,7 @@ public class AmbariMetaInfoTest {
   public void before() throws Exception {
     File stackRoot = new File("src/test/resources/stacks");
    LOG.info("Stacks file " + stackRoot.getAbsolutePath());
-    metaInfo = new AmbariMetaInfo(stackRoot, new File("target/version"));
+    metaInfo = new AmbariMetaInfo(stackRoot, new File("../version"));
     try {
       metaInfo.init();
     } catch(Exception e) {
@@ -280,7 +280,7 @@ public class AmbariMetaInfoTest {
     File stackRoot = new File("src/test/resources/stacks");
     File stackRootTmp = new File(buildDir + "/ambari-metaInfo"); stackRootTmp.mkdir();
     FileUtils.copyDirectory(stackRoot, stackRootTmp);
-    AmbariMetaInfo ambariMetaInfo = new AmbariMetaInfo(stackRootTmp, new File("target/version"));
+    AmbariMetaInfo ambariMetaInfo = new AmbariMetaInfo(stackRootTmp, new File("../version"));
     File f1, f2, f3;
     f1 = new File(stackRootTmp.getAbsolutePath() + "/001.svn"); f1.createNewFile();
     f2 = new File(stackRootTmp.getAbsolutePath() + "/abcd.svn/001.svn"); f2.mkdirs(); f2.createNewFile();

+ 2 - 2
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaServiceTest.java

@@ -67,7 +67,7 @@ public class AmbariMetaServiceTest extends JerseyTest {
     AmbariMetaInfo ambariMetaInfo;
     
     public MockModule() throws Exception {
-      this.ambariMetaInfo = new AmbariMetaInfo(stackRoot, new File("target/version"));
+      this.ambariMetaInfo = new AmbariMetaInfo(stackRoot, new File("../version"));
     }
 
     @Override
@@ -108,4 +108,4 @@ public class AmbariMetaServiceTest extends JerseyTest {
     ServiceInfo info = mapper.readValue(output, ServiceInfo.class);
     Assert.assertEquals("HDFS", info.getName());
   }
-}
+}

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java

@@ -1479,7 +1479,7 @@ public class AmbariManagementControllerImplTest {
         properties.setProperty(Configuration.METADETA_DIR_PATH,
             "src/main/resources/stacks");
         properties.setProperty(Configuration.SERVER_VERSION_FILE,
-                "target/version");
+                "../version");
         properties.setProperty(Configuration.OS_VERSION_KEY,
             "centos5");
         try {

+ 2 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/ganglia/GangliaPropertyProviderTest.java

@@ -149,7 +149,7 @@ public class GangliaPropertyProviderTest {
     Request  request = PropertyHelper.getReadRequest(Collections.singleton(PROPERTY_ID), temporalInfoMap);
 
     Assert.assertEquals(3, propertyProvider.populateResources(resources, request, null).size());
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPSlaves,HDPHistoryServer,HDPNameNode&h=domU-12-31-39-0E-34-E3.compute-1.internal,domU-12-31-39-0E-34-E1.compute-1.internal,domU-12-31-39-0E-34-E2.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1",
+    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPSlaves,HDPNameNode&h=domU-12-31-39-0E-34-E3.compute-1.internal,domU-12-31-39-0E-34-E1.compute-1.internal,domU-12-31-39-0E-34-E2.compute-1.internal&m=jvm.metrics.gcCount&s=10&e=20&r=1",
         streamProvider.getLastSpec());
 
     for (Resource res : resources) {
@@ -186,7 +186,7 @@ public class GangliaPropertyProviderTest {
 
     Assert.assertEquals(150, propertyProvider.populateResources(resources, request, null).size());
 
-    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPResourceManager,HDPSlaves,HDPHistoryServer,HDPNameNode&m=jvm.metrics.gcCount&s=10&e=20&r=1",
+    Assert.assertEquals("http://domU-12-31-39-0E-34-E1.compute-1.internal/cgi-bin/rrd.py?c=HDPJobTracker,HDPHBaseMaster,HDPSlaves,HDPNameNode&m=jvm.metrics.gcCount&s=10&e=20&r=1",
         streamProvider.getLastSpec());
 
   }

+ 0 - 8
ambari-server/src/test/java/org/apache/ambari/server/metadata/RoleGraphTest.java

@@ -63,13 +63,5 @@ public class RoleGraphTest {
     Assert.assertEquals(1, rco.order(nagios_server_install, mapred_client_install));
     Assert.assertEquals(1, rco.order(nagios_server_install, hcat_client_install));
     Assert.assertEquals(1, rco.order(nagios_server_install, oozie_client_install));
-
-    RoleGraphNode pig_service_check = new RoleGraphNode(Role.PIG_SERVICE_CHECK, RoleCommand.EXECUTE);
-    RoleGraphNode resourcemanager_start = new RoleGraphNode(Role.RESOURCEMANAGER, RoleCommand.START);
-    Assert.assertEquals(-1, rco.order(resourcemanager_start, pig_service_check));
-
-    RoleGraphNode hdfs_service_check = new RoleGraphNode(Role.HDFS_SERVICE_CHECK, RoleCommand.EXECUTE);
-    RoleGraphNode snamenode_start = new RoleGraphNode(Role.SECONDARY_NAMENODE, RoleCommand.START);
-    Assert.assertEquals(-1, rco.order(snamenode_start, hdfs_service_check));
   }
 }

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/orm/InMemoryDefaultTestModule.java

@@ -33,7 +33,7 @@ public class InMemoryDefaultTestModule extends AbstractModule {
     properties.setProperty(Configuration.METADETA_DIR_PATH,
         "src/test/resources/stacks");
     properties.setProperty(Configuration.SERVER_VERSION_FILE,
-            "target/version");
+            "../version");
     properties.setProperty(Configuration.OS_VERSION_KEY,
         "centos5");
     try {

+ 1 - 1
ambari-server/src/test/java/org/apache/ambari/server/security/authorization/AuthorizationTestModule.java

@@ -35,7 +35,7 @@ public class AuthorizationTestModule extends AbstractModule {
     properties.setProperty(Configuration.METADETA_DIR_PATH,
         "src/test/resources/stacks");
     properties.setProperty(Configuration.SERVER_VERSION_FILE,
-        "target/version");
+        "../version");
     properties.setProperty(Configuration.OS_VERSION_KEY,
         "centos5");
 

+ 1 - 1
ambari-server/src/test/resources/stacks/HDP/0.2/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>1.0</version>
 
 

+ 1 - 1
ambari-server/src/test/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml

@@ -17,7 +17,7 @@
 -->
 <metainfo>
     <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
+    <comment>This is comment for ZOOKEEPER service</comment>
     <version>3.4.5</version>
 
     <components>

+ 1 - 4
ambari-web/app/app.js

@@ -51,10 +51,7 @@ module.exports = Em.Application.create({
     return '/stacks2/HDP/versions/' + stackVersion.replace(/HDP-/g, '');
   }.property('currentStackVersion'),
   clusterName: null,
-  currentStackVersion: '',
-  currentStackVersionNumber: function(){
-    return this.get('currentStackVersion').replace(/HDP(Local)?-/, '');
-  }.property('currentStackVersion')
+  currentStackVersion: null
 });
 
 /**

+ 0 - 137
ambari-web/app/assets/data/wizard/stack/hdp/version/1.2.1.json

@@ -1,137 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices?fields=StackServices",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for WEBHCAT service",
-        "service_version" : "0.5.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/GANGLIA",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "GANGLIA",
-        "stack_name" : "HDP",
-        "comments" : "Ganglia Metrics Collection system",
-        "service_version" : "3.2.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/NAGIOS",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "NAGIOS",
-        "stack_name" : "HDP",
-        "comments" : "Nagios Monitoring and Alerting system",
-        "service_version" : "3.2.3"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE",
-      "StackServices" : {
-        "user_name" : "mapred",
-        "stack_version" : "1.2.1",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization",
-        "service_version" : "0.94.5"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/SQOOP",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "SQOOP",
-        "stack_name" : "HDP",
-        "comments" : "Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases",
-        "service_version" : "1.4.2"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "comments" : "Apache Hadoop Distributed File System",
-        "service_version" : "1.1.2"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE",
-      "StackServices" : {
-        "user_name" : "mapred",
-        "stack_version" : "1.2.1",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "comments" : "Apache Hadoop Distributed Processing Framework",
-        "service_version" : "1.1.2"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/PIG",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "PIG",
-        "stack_name" : "HDP",
-        "comments" : "Scripting platform for analyzing large datasets",
-        "service_version" : "0.10.1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/ZOOKEEPER",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "ZOOKEEPER",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for ZOOKEEPER service",
-        "service_version" : "3.4.5"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs",
-        "service_version" : "3.2.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCATALOG",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "HCATALOG",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for HCATALOG service",
-        "service_version" : "0.5.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "1.2.1",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
-        "service_version" : "0.10.0"
-      }
-    }
-  ]
-}

+ 0 - 148
ambari-web/app/assets/data/wizard/stack/hdp/version/2.0.1.json

@@ -1,148 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices?fields=StackServices",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2",
-      "StackServices" : {
-        "user_name" : "mapred",
-        "stack_version" : "2.0.1",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "comments" : "Apache Hadoop NextGen MapReduce (client libraries)",
-        "service_version" : "2.0.3.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/OOZIE",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "comments" : "System for workflow coordination and execution of Apache Hadoop jobs",
-        "service_version" : "3.3.1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/PIG",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "PIG",
-        "stack_name" : "HDP",
-        "comments" : "Scripting platform for analyzing large datasets",
-        "service_version" : "0.10.1.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HCATALOG",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "HCATALOG",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for HCATALOG service",
-        "service_version" : "0.5.0.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/WEBHCAT",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for WEBHCAT service",
-        "service_version" : "0.5.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/ZOOKEEPER",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "ZOOKEEPER",
-        "stack_name" : "HDP",
-        "comments" : "This is comment for ZOOKEEPER service",
-        "service_version" : "3.4.5.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/GANGLIA",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "GANGLIA",
-        "stack_name" : "HDP",
-        "comments" : "Ganglia Metrics Collection system",
-        "service_version" : "3.2.0"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HBASE",
-      "StackServices" : {
-        "user_name" : "mapred",
-        "stack_version" : "2.0.1",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "comments" : "Non-relational distributed database and centralized service for configuration management & synchronization",
-        "service_version" : "0.94.5.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HIVE",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "comments" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
-        "service_version" : "0.10.0.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/YARN",
-      "StackServices" : {
-        "user_name" : "mapred",
-        "stack_version" : "2.0.1",
-        "service_name" : "YARN",
-        "stack_name" : "HDP",
-        "comments" : "Apache Hadoop NextGen MapReduce (YARN)",
-        "service_version" : "2.0.3.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/NAGIOS",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "NAGIOS",
-        "stack_name" : "HDP",
-        "comments" : "Nagios Monitoring and Alerting system",
-        "service_version" : "3.2.3"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/TEZ",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "TEZ",
-        "stack_name" : "HDP",
-        "comments" : "Tez is the next generation Hadoop Query Processing framework written on top of YARN",
-        "service_version" : "0.1.0.22-1"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/HDFS",
-      "StackServices" : {
-        "user_name" : "root",
-        "stack_version" : "2.0.1",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "comments" : "Apache Hadoop Distributed File System",
-        "service_version" : "2.0.3.22-1"
-      }
-    }
-  ]
-}

+ 0 - 113
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HBASE.json

@@ -1,113 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
-      "StackConfigurations" : {
-        "property_description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
-        "property_value" : "3000",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.master.lease.thread.wakefrequency",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.superuser",
-      "StackConfigurations" : {
-        "property_description" : "List of users or groups (comma-separated), who are allowed\n    full privileges, regardless of stored ACLs, across the cluster.\n    Only used when HBase security is enabled.\n    ",
-        "property_value" : "hbase",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.superuser",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.client.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HRegionInterface protocol implementations (ie. \n    clients talking to HRegionServers)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.client.protocol.acl",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.admin.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HMasterInterface protocol implementation (ie. \n    clients talking to HMaster for admin operations).\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.admin.protocol.acl",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HMasterRegionInterface protocol implementations\n    (for HRegionServers communicating with HMaster)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.masterregion.protocol.acl",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
-      "StackConfigurations" : {
-        "property_description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.zookeeper.useMulti",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
-      "StackConfigurations" : {
-        "property_description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
-        "property_value" : "2181",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.zookeeper.property.clientPort",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.cluster.distributed",
-      "StackConfigurations" : {
-        "property_description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.cluster.distributed",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
-      "StackConfigurations" : {
-        "property_description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
-        "property_value" : "10000",
-        "stack_version" : "1.2.1",
-        "property_name" : "hbase.regionserver.optionalcacheflushinterval",
-        "service_name" : "HBASE",
-        "stack_name" : "HDP",
-        "type" : "hbase-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 4
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HCATALOG.json

@@ -1,4 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HCATALOG/configurations?fields=*",
-  "items" : [ ]
-}

+ 0 - 533
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HDFS.json

@@ -1,533 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.client.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.client.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
-      "StackConfigurations" : {
-        "property_description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
-        "property_value" : "6250000",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.balance.bandwidthPerSec",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.block.size",
-      "StackConfigurations" : {
-        "property_description" : "The default block size for new files.",
-        "property_value" : "134217728",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.block.size",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.secondary.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where secondary-namenode binds",
-        "property_value" : "50490",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.secondary.https.port",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
-        "property_value" : "536870912",
-        "stack_version" : "1.2.1",
-        "property_name" : "fs.checkpoint.size",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.period",
-      "StackConfigurations" : {
-        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
-        "property_value" : "21600",
-        "stack_version" : "1.2.1",
-        "property_name" : "fs.checkpoint.period",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
-      "StackConfigurations" : {
-        "property_description" : "PRIVATE CONFIG VARIABLE",
-        "property_value" : "4096",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.datanode.max.xcievers",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.permissions.supergroup",
-      "StackConfigurations" : {
-        "property_description" : "The name of the group of super-users.",
-        "property_value" : "hdfs",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.permissions.supergroup",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.access.time.precision",
-      "StackConfigurations" : {
-        "property_description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
-        "property_value" : "0",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.access.time.precision",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/webinterface.private.actions",
-      "StackConfigurations" : {
-        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "webinterface.private.actions",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.web.ugi",
-      "StackConfigurations" : {
-        "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
-        "property_value" : "gopher,gopher",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.web.ugi",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.umaskmode",
-      "StackConfigurations" : {
-        "property_description" : "\nThe octal umask used when creating files and directories.\n",
-        "property_value" : "077",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.umaskmode",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
-      "StackConfigurations" : {
-        "property_description" : "DFS Client write socket timeout",
-        "property_value" : "0",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.datanode.socket.write.timeout",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.block.access.token.enable",
-      "StackConfigurations" : {
-        "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.block.access.token.enable",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.task.umbilical.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.inter.tracker.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.du.pct",
-      "StackConfigurations" : {
-        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
-        "property_value" : "0.85f",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.datanode.du.pct",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.file.buffer.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
-        "property_value" : "131072",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.file.buffer.size",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.inter.datanode.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.permissions",
-      "StackConfigurations" : {
-        "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.permissions",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
-      "StackConfigurations" : {
-        "property_description" : "Defines the maximum number of retries for IPC connections.",
-        "property_value" : "50",
-        "stack_version" : "1.2.1",
-        "property_name" : "ipc.client.connect.max.retries",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "Added to grow Queue size so that more client connections are allowed",
-        "property_value" : "100",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.namenode.handler.count",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.job.submission.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
-      "StackConfigurations" : {
-        "property_description" : "Delay for first block report in seconds.",
-        "property_value" : "120",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.blockreport.initialDelay",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.heartbeat.interval",
-      "StackConfigurations" : {
-        "property_description" : "Determines datanode heartbeat interval in seconds.",
-        "property_value" : "3",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.heartbeat.interval",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
-      "StackConfigurations" : {
-        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
-        "property_value" : "30000",
-        "stack_version" : "1.2.1",
-        "property_name" : "ipc.client.connection.maxidletime",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.compression.codecs",
-      "StackConfigurations" : {
-        "property_description" : "A list of the compression codec classes that can be used\n                 for compression/decompression.",
-        "property_value" : "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.compression.codecs",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.server.max.response.size",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5242880",
-        "stack_version" : "1.2.1",
-        "property_name" : "ipc.server.max.response.size",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.namenode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.namenode.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5",
-        "stack_version" : "1.2.1",
-        "property_name" : "ipc.server.read.threadpool.size",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
-      "StackConfigurations" : {
-        "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
-        "property_value" : "0.0.0.0:8010",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.datanode.ipc.address",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.cluster.administrators",
-      "StackConfigurations" : {
-        "property_description" : "ACL for who all can view the default servlets in the HDFS",
-        "property_value" : " hdfs",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.cluster.administrators",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.trash.interval",
-      "StackConfigurations" : {
-        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
-        "property_value" : "360",
-        "stack_version" : "1.2.1",
-        "property_name" : "fs.trash.interval",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/ipc.client.idlethreshold",
-      "StackConfigurations" : {
-        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
-        "property_value" : "8000",
-        "stack_version" : "1.2.1",
-        "property_name" : "ipc.client.idlethreshold",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.datanode.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "The number of server threads for the namenode.",
-        "property_value" : "40",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.namenode.handler.count",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
-      "StackConfigurations" : {
-        "property_description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
-        "property_value" : "1.0f",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.safemode.threshold.pct",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.replication.max",
-      "StackConfigurations" : {
-        "property_description" : "Maximal block replication.\n  ",
-        "property_value" : "50",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.replication.max",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "security.client.datanode.protocol.acl",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hadoop-policy.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.serializations",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.serializations",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
-      "StackConfigurations" : {
-        "property_description" : "The implementation for lzo codec.",
-        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.compression.codec.lzo.class",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where namenode binds",
-        "property_value" : "50470",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.https.port",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
-        "property_value" : "${fs.checkpoint.dir}",
-        "stack_version" : "1.2.1",
-        "property_name" : "fs.checkpoint.edits.dir",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "core-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
-      "StackConfigurations" : {
-        "property_description" : "Number of failed disks datanode would tolerate",
-        "property_value" : "0",
-        "stack_version" : "1.2.1",
-        "property_name" : "dfs.datanode.failed.volumes.tolerated",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP",
-        "type" : "hdfs-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 149
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/HIVE.json

@@ -1,149 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
-      "StackConfigurations" : {
-        "property_description" : "MetaStore Client socket timeout in seconds",
-        "property_value" : "60",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.metastore.client.socket.timeout",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.security.authorization.manager",
-      "StackConfigurations" : {
-        "property_description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
-        "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.security.authorization.manager",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.security.authorization.enabled",
-      "StackConfigurations" : {
-        "property_description" : "enable or disable the hive client authorization",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.security.authorization.enabled",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
-      "StackConfigurations" : {
-        "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
-        "property_value" : "Table,Database,Type,FieldSchema,Order",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.metastore.cache.pinobjtypes",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
-      "StackConfigurations" : {
-        "property_description" : "FS operations are owned by client",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hadoop.clientside.fs.operations",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "fs.hdfs.impl.disable.cache",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
-      "StackConfigurations" : {
-        "property_description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
-        "property_value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.semantic.analyzer.factory.impl",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.local",
-      "StackConfigurations" : {
-        "property_description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.metastore.local",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
-      "StackConfigurations" : {
-        "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.metastore.execute.setugi",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
-      "StackConfigurations" : {
-        "property_description" : "location of default database for the warehouse",
-        "property_value" : "/apps/hive/warehouse",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.metastore.warehouse.dir",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
-      "StackConfigurations" : {
-        "property_description" : "Driver class name for a JDBC metastore",
-        "property_value" : "com.mysql.jdbc.Driver",
-        "stack_version" : "1.2.1",
-        "property_name" : "javax.jdo.option.ConnectionDriverName",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/HIVE/configurations/hive.server2.enable.doAs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "hive.server2.enable.doAs",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP",
-        "type" : "hive-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 725
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/MAPREDUCE.json

@@ -1,725 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.maximum-system-jobs",
-      "StackConfigurations" : {
-        "property_description" : "Maximum number of jobs in the system which can be initialized,\n     concurrently, by the CapacityScheduler.\n    ",
-        "property_value" : "3000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.maximum-system-jobs",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
-      "StackConfigurations" : {
-        "property_description" : "The default maximum number of tasks per-user, across all the of \n    the user's jobs in the queue, which can be initialized concurrently. Once \n    the user's jobs exceed this limit they will be queued on disk.  \n    ",
-        "property_value" : "100000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-user",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
-        "property_value" : "50",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.tracker.handler.count",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.healthChecker.interval",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "135000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.healthChecker.interval",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.restart.recover",
-      "StackConfigurations" : {
-        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.restart.recover",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.hours",
-      "StackConfigurations" : {
-        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
-        "property_value" : "1",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-poll-interval",
-      "StackConfigurations" : {
-        "property_description" : "The amount of time in miliseconds which is used to poll \n    the job queues for jobs to initialize.\n    ",
-        "property_value" : "5000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.init-poll-interval",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.parallel.copies",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "30",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.reduce.parallel.copies",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.reuse.jvm.num.tasks",
-      "StackConfigurations" : {
-        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
-        "property_value" : "1",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.reuse.jvm.num.tasks",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.capacity",
-      "StackConfigurations" : {
-        "property_description" : "Percentage of the number of slots in the cluster that are\n      to be available for jobs in this queue.\n    ",
-        "property_value" : "100",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.capacity",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.history.completed.location",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/history/done",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.tracker.history.completed.location",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.supports-priority",
-      "StackConfigurations" : {
-        "property_description" : "If true, priorities of jobs will be taken into \n      account in scheduling decisions.\n    ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.supports-priority",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/tasktracker.http.threads",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "50",
-        "stack_version" : "1.2.1",
-        "property_name" : "tasktracker.http.threads",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
-      "StackConfigurations" : {
-        "property_description" : "The default maximum number of tasks, across all jobs in the \n    queue, which can be initialized concurrently. Once the queue's jobs exceed \n    this limit they will be queued on disk.  \n    ",
-        "property_value" : "200000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-maximum-active-tasks-per-queue",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.cluster.administrators",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : " hadoop",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.cluster.administrators",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
-      "StackConfigurations" : {
-        "property_description" : "The multipe of (maximum-system-jobs * queue-capacity) used to \n    determine the number of jobs which are accepted by the scheduler.  \n    ",
-        "property_value" : "10",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.init-accept-jobs-factor",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
-      "StackConfigurations" : {
-        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
-        "property_value" : "180",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
-      "StackConfigurations" : {
-        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
-        "property_value" : "250",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-administer-jobs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.queue.default.acl-administer-jobs",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-user-limit-factor",
-      "StackConfigurations" : {
-        "property_description" : "The default multiple of queue-capacity which is used to \n    determine the amount of slots a single user can consume concurrently.\n    ",
-        "property_value" : "1",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-user-limit-factor",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.max.tracker.blacklists",
-      "StackConfigurations" : {
-        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
-        "property_value" : "16",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.max.tracker.blacklists",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.map.output.compression.codec",
-      "StackConfigurations" : {
-        "property_description" : "If the map outputs are compressed, how should they be\n      compressed\n    ",
-        "property_value" : "org.apache.hadoop.io.compress.SnappyCodec",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.map.output.compression.codec",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.healthChecker.script.timeout",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "60000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.healthChecker.script.timeout",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/jetty.connector",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
-        "stack_version" : "1.2.1",
-        "property_name" : "jetty.connector",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-supports-priority",
-      "StackConfigurations" : {
-        "property_description" : "If true, priorities of jobs will be taken into \n      account in scheduling decisions by default in a job queue.\n    ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-supports-priority",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-capacity",
-      "StackConfigurations" : {
-        "property_description" : "\n\tmaximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.\n\tThis provides a means to limit how much excess capacity a queue can use. By default, there is no limit.\n\tThe maximum-capacity of a queue can only be greater than or equal to its minimum capacity.\n        Default value of -1 implies a queue can use complete capacity of the cluster.\n\n        This property could be to curtail certain jobs which are long running in nature from occupying more than a \n        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of \n        other queues being affected.\n        \n        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity\n        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in \n        absolute terms would increase accordingly.\n    ",
-        "property_value" : "-1",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-capacity",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.child.root.logger",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "INFO,TLA",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.child.root.logger",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-minimum-user-limit-percent",
-      "StackConfigurations" : {
-        "property_description" : "The percentage of the resources limited to a particular user\n      for the job queue at any given point of time by default.\n    ",
-        "property_value" : "100",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-minimum-user-limit-percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/hadoop.job.history.user.location",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "none",
-        "stack_version" : "1.2.1",
-        "property_name" : "hadoop.job.history.user.location",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.input.buffer.percent",
-      "StackConfigurations" : {
-        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
-        "property_value" : "0.7",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.shuffle.input.buffer.percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.slowstart.completed.maps",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "0.05",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.reduce.slowstart.completed.maps",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.history.server.embedded",
-      "StackConfigurations" : {
-        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.history.server.embedded",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/io.sort.factor",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "100",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.sort.factor",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.reduce.tasks.speculative.execution",
-      "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.reduce.tasks.speculative.execution",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.tracker.persist.jobstatus.active",
-      "StackConfigurations" : {
-        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.inmem.merge.threshold",
-      "StackConfigurations" : {
-        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
-        "property_value" : "1000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.inmem.merge.threshold",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.init-worker-threads",
-      "StackConfigurations" : {
-        "property_description" : "Number of worker threads which would be used by\n    Initialization poller to initialize jobs in a set of queue.\n    If number mentioned in property is equal to number of job queues\n    then a single thread would initialize jobs in a queue. If lesser\n    then a thread would get a set of queues assigned. If the number\n    is greater then number of threads would be equal to number of \n    job queues.\n    ",
-        "property_value" : "5",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.init-worker-threads",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
-      "StackConfigurations" : {
-        "property_description" : "The maximum number of tasks, across all jobs in the queue, \n    which can be initialized concurrently. Once the queue's jobs exceed this \n    limit they will be queued on disk.  \n    ",
-        "property_value" : "200000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.default.acl-submit-job",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "*",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.queue.default.acl-submit-job",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.reduce.input.buffer.percent",
-      "StackConfigurations" : {
-        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
-        "property_value" : "0.0",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.reduce.input.buffer.percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.completeuserjobs.maximum",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.queue.names",
-      "StackConfigurations" : {
-        "property_description" : " Comma separated list of queues configured for this jobtracker.",
-        "property_value" : "default",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.queue.names",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
-      "StackConfigurations" : {
-        "property_description" : "The maximum number of tasks per-user, across all the of the \n    user's jobs in the queue, which can be initialized concurrently. Once the \n    user's jobs exceed this limit they will be queued on disk.  \n    ",
-        "property_value" : "100000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.map.tasks.speculative.execution",
-      "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.map.tasks.speculative.execution",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.default-init-accept-jobs-factor",
-      "StackConfigurations" : {
-        "property_description" : "The default multipe of (maximum-system-jobs * queue-capacity) \n    used to determine the number of jobs which are accepted by the scheduler.  \n    ",
-        "property_value" : "10",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.default-init-accept-jobs-factor",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
-      "StackConfigurations" : {
-        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
-        "property_value" : "15",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.tasktracker.group",
-      "StackConfigurations" : {
-        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
-        "property_value" : "hadoop",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.tasktracker.group",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
-      "StackConfigurations" : {
-        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
-        "property_value" : "50000000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.job.shuffle.merge.percent",
-      "StackConfigurations" : {
-        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
-        "property_value" : "0.66",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.job.shuffle.merge.percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.reduce.input.limit",
-      "StackConfigurations" : {
-        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
-        "property_value" : "10737418240",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.reduce.input.limit",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.user-limit-factor",
-      "StackConfigurations" : {
-        "property_description" : "The multiple of the queue capacity which can be configured to \n    allow a single user to acquire more slots. \n    ",
-        "property_value" : "1",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.user-limit-factor",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/io.sort.record.percent",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : ".2",
-        "stack_version" : "1.2.1",
-        "property_name" : "io.sort.record.percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.output.compression.type",
-      "StackConfigurations" : {
-        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
-        "property_value" : "BLOCK",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.output.compression.type",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.task.timeout",
-      "StackConfigurations" : {
-        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
-        "property_value" : "600000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.task.timeout",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.check",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "10000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.retirejob.check",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
-      "StackConfigurations" : {
-        "property_description" : " Each queue enforces a limit on the percentage of resources \n    allocated to a user at any given time, if there is competition for them. \n    This user limit can vary between a minimum and maximum value. The former\n    depends on the number of users who have submitted jobs, and the latter is\n    set to this property value. For example, suppose the value of this \n    property is 25. If two users have submitted jobs to a queue, no single \n    user can use more than 50% of the queue resources. If a third user submits\n    a job, no single user can use more than 33% of the queue resources. With 4 \n    or more users, no user can use more than 25% of the queue's resources. A \n    value of 100 implies no user limits are imposed. \n    ",
-        "property_value" : "100",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.capacity-scheduler.queue.default.minimum-user-limit-percent",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "capacity-scheduler.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.jobtracker.retirejob.interval",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "21600000",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.jobtracker.retirejob.interval",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapred.system.dir",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/system",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapred.system.dir",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/MAPREDUCE/configurations/mapreduce.jobtracker.staging.root.dir",
-      "StackConfigurations" : {
-        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
-        "property_value" : "/user",
-        "stack_version" : "1.2.1",
-        "property_name" : "mapreduce.jobtracker.staging.root.dir",
-        "service_name" : "MAPREDUCE",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 317
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/OOZIE.json

@@ -1,317 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.pool.max.active.conn",
-      "StackConfigurations" : {
-        "property_description" : "\n             Max number of connections.\n        ",
-        "property_value" : "10",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.pool.max.active.conn",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.PurgeService.older.than",
-      "StackConfigurations" : {
-        "property_description" : "\n     Jobs older than this value, in days, will be purged by the PurgeService.\n     ",
-        "property_value" : "30",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.PurgeService.older.than",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.system.id",
-      "StackConfigurations" : {
-        "property_description" : "\n    The Oozie system ID.\n    ",
-        "property_value" : "oozie-${user.name}",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.system.id",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.authentication.kerberos.name.rules",
-      "StackConfigurations" : {
-        "property_description" : "The mapping from kerberos principal names to local OS user names.",
-        "property_value" : "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT\n        ",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.authentication.kerberos.name.rules",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.base.url",
-      "StackConfigurations" : {
-        "property_description" : "Base Oozie URL.",
-        "property_value" : "http://localhost:11000/oozie",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.base.url",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.SchemaService.wf.ext.schemas",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.SchemaService.wf.ext.schemas",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.password",
-      "StackConfigurations" : {
-        "property_description" : "\n            DB user password.\n\n            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n                       if empty Configuration assumes it is NULL.\n        ",
-        "property_value" : " ",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.jdbc.password",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.callable.concurrency",
-      "StackConfigurations" : {
-        "property_description" : "\n     Maximum concurrency for a given callable type.\n     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n     All commands that use action executors (action-start, action-end, action-kill and action-check) use\n     the action type as the callable type.\n     ",
-        "property_value" : "3",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.CallableQueueService.callable.concurrency",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.db.schema.name",
-      "StackConfigurations" : {
-        "property_description" : "\n      Oozie DataBase Name\n     ",
-        "property_value" : "oozie",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.db.schema.name",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.username",
-      "StackConfigurations" : {
-        "property_description" : "\n            DB user name.\n        ",
-        "property_value" : "sa",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.jdbc.username",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.jobTracker.whitelist",
-      "StackConfigurations" : {
-        "property_description" : "\n      Whitelisted job tracker for Oozie service.\n      ",
-        "property_value" : " ",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.url",
-      "StackConfigurations" : {
-        "property_description" : "\n            JDBC URL.\n        ",
-        "property_value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.jdbc.url",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.AuthorizationService.security.enabled",
-      "StackConfigurations" : {
-        "property_description" : "\n     Specifies whether security (user name/admin role) is enabled or not.\n     If disabled any user can manage Oozie system and manage any job.\n     ",
-        "property_value" : "true",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.AuthorizationService.security.enabled",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.create.db.schema",
-      "StackConfigurations" : {
-        "property_description" : "\n            Creates Oozie DB.\n\n            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n        ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.create.db.schema",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.JPAService.jdbc.driver",
-      "StackConfigurations" : {
-        "property_description" : "\n            JDBC driver class.\n        ",
-        "property_value" : "org.apache.derby.jdbc.EmbeddedDriver",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.JPAService.jdbc.driver",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.ActionService.executor.ext.classes",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor\n        ",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.ActionService.executor.ext.classes",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.systemmode",
-      "StackConfigurations" : {
-        "property_description" : "\n     System mode for  Oozie at startup.\n     ",
-        "property_value" : "NORMAL",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.systemmode",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.queue.size",
-      "StackConfigurations" : {
-        "property_description" : "Max callable queue size",
-        "property_value" : "1000",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.CallableQueueService.queue.size",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.nameNode.whitelist",
-      "StackConfigurations" : {
-        "property_description" : "\n      ",
-        "property_value" : " ",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/use.system.libpath.for.mapreduce.and.pig.jobs",
-      "StackConfigurations" : {
-        "property_description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n      ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.PurgeService.purge.interval",
-      "StackConfigurations" : {
-        "property_description" : "\n     Interval at which the purge service will run, in seconds.\n     ",
-        "property_value" : "3600",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.PurgeService.purge.interval",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.authentication.type",
-      "StackConfigurations" : {
-        "property_description" : "\n      ",
-        "property_value" : "simple",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.authentication.type",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.CallableQueueService.threads",
-      "StackConfigurations" : {
-        "property_description" : "Number of threads used for executing callables",
-        "property_value" : "10",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.CallableQueueService.threads",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.coord.normal.default.timeout",
-      "StackConfigurations" : {
-        "property_description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n      -1 means infinite timeout",
-        "property_value" : "120",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.coord.normal.default.timeout",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.WorkflowAppService.system.libpath",
-      "StackConfigurations" : {
-        "property_description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n      ",
-        "property_value" : "/user/${user.name}/share/lib",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.WorkflowAppService.system.libpath",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/OOZIE/configurations/oozie.service.HadoopAccessorService.hadoop.configurations",
-      "StackConfigurations" : {
-        "property_description" : "\n          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n          the relevant Hadoop *-site.xml files. If the path is relative is looked within\n          the Oozie configuration directory; though the path can be absolute (i.e. to point\n          to Hadoop client conf/ directories in the local filesystem.\n      ",
-        "property_value" : "*=/etc/hadoop/conf",
-        "stack_version" : "1.2.1",
-        "property_name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
-        "service_name" : "OOZIE",
-        "stack_name" : "HDP",
-        "type" : "oozie-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 173
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/WEBHCAT.json

@@ -1,173 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.override.enabled",
-      "StackConfigurations" : {
-        "property_description" : "\n     Enable the override path in templeton.override.jars\n   ",
-        "property_value" : "false",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.override.enabled",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hive.archive",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Hive archive.",
-        "property_value" : "hdfs:///apps/webhcat/hive.tar.gz",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.hive.archive",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.jar",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Templeton jar file.",
-        "property_value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.jar",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.streaming.jar",
-      "StackConfigurations" : {
-        "property_description" : "The hdfs path to the Hadoop streaming jar file.",
-        "property_value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.streaming.jar",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hadoop",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Hadoop executable.",
-        "property_value" : "/usr/bin/hadoop",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.hadoop",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.pig.path",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Pig executable.",
-        "property_value" : "pig.tar.gz/pig/bin/pig",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.pig.path",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.libjars",
-      "StackConfigurations" : {
-        "property_description" : "Jars to add the the classpath.",
-        "property_value" : "/usr/lib/zookeeper/zookeeper.jar",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.libjars",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.pig.archive",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Pig archive.",
-        "property_value" : "hdfs:///apps/webhcat/pig.tar.gz",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.pig.archive",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hcat",
-      "StackConfigurations" : {
-        "property_description" : "The path to the hcatalog executable.",
-        "property_value" : "/usr/bin/hcat",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.hcat",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hive.path",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Hive executable.",
-        "property_value" : "hive.tar.gz/hive/bin/hive",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.hive.path",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.storage.class",
-      "StackConfigurations" : {
-        "property_description" : "The class to use as storage",
-        "property_value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.storage.class",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.hadoop.conf.dir",
-      "StackConfigurations" : {
-        "property_description" : "The path to the Hadoop configuration.",
-        "property_value" : "/etc/hadoop/conf",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.hadoop.conf.dir",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.port",
-      "StackConfigurations" : {
-        "property_description" : "The HTTP port for the main server.",
-        "property_value" : "50111",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.port",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/WEBHCAT/configurations/templeton.exec.timeout",
-      "StackConfigurations" : {
-        "property_description" : "Time out for templeton api",
-        "property_value" : "60000",
-        "stack_version" : "1.2.1",
-        "property_name" : "templeton.exec.timeout",
-        "service_name" : "WEBHCAT",
-        "stack_name" : "HDP",
-        "type" : "webhcat-site.xml"
-      }
-    }
-  ]
-}

+ 0 - 4
ambari-web/app/assets/data/wizard/stack/hdp/version1.2.1/ZOOKEEPER.json

@@ -1,4 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.2.1/stackServices/ZOOKEEPER/configurations?fields=*",
-  "items" : [ ]
-}

+ 0 - 65
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/GANGLIA.json

@@ -1,65 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations?fields=*&_=1368459065278",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
-      "StackConfigurations" : {
-        "property_description" : "Run directories for Ganglia",
-        "property_value" : "/var/run/ganglia/hdp",
-        "stack_version" : "1.3.0",
-        "property_name" : "ganglia_runtime_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
-      "StackConfigurations" : {
-        "property_description" : "User ",
-        "property_value" : "nobody",
-        "stack_version" : "1.3.0",
-        "property_name" : "gmetad_user",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
-      "StackConfigurations" : {
-        "property_description" : "Default directory for saving the rrd files on ganglia server",
-        "property_value" : "/var/lib/ganglia/rrds",
-        "stack_version" : "1.3.0",
-        "property_name" : "rrdcached_base_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmond_user",
-      "StackConfigurations" : {
-        "property_description" : "User ",
-        "property_value" : "nobody",
-        "stack_version" : "1.3.0",
-        "property_name" : "gmond_user",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
-      "StackConfigurations" : {
-        "property_description" : "Config directory for Ganglia",
-        "property_value" : "/etc/ganglia/hdp",
-        "stack_version" : "1.3.0",
-        "property_name" : "ganglia_conf_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 41
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/NAGIOS.json

@@ -1,41 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations?fields=*&_=1368459065260",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_group",
-      "StackConfigurations" : {
-        "property_description" : "Nagios Group.",
-        "property_value" : "nagios",
-        "stack_version" : "1.3.0",
-        "property_name" : "nagios_group",
-        "service_name" : "NAGIOS",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_web_login",
-      "StackConfigurations" : {
-        "property_description" : "Nagios web user.",
-        "property_value" : "nagiosadmin",
-        "stack_version" : "1.3.0",
-        "property_name" : "nagios_web_login",
-        "service_name" : "NAGIOS",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/NAGIOS/configurations/nagios_user",
-      "StackConfigurations" : {
-        "property_description" : "Nagios Username.",
-        "property_value" : "nagios",
-        "stack_version" : "1.3.0",
-        "property_name" : "nagios_user",
-        "service_name" : "NAGIOS",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 4
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/PIG.json

@@ -1,4 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
-  "items" : [ ]
-}

+ 0 - 4
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/SQOOP.json

@@ -1,4 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/PIG/configurations?fields=*&_=1368459065432",
-  "items" : [ ]
-}

+ 60 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/HBASE.json

@@ -0,0 +1,60 @@
+{
+  "name" : "HBASE",
+  "version" : "0.94.5",
+  "user" : "mapred",
+  "comment" : "Non-relational distributed database and centralized service for configuration management & synchronization",
+  "properties" : [ {
+    "name" : "hbase.cluster.distributed",
+    "value" : "true",
+    "description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
+    "filename" : "hbase-site.xml"
+  }, {
+    "name" : "hbase.master.lease.thread.wakefrequency",
+    "value" : "3000",
+    "description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
+    "filename" : "hbase-site.xml"
+  }, {
+    "name" : "hbase.superuser",
+    "value" : "hbase",
+    "description" : "List of users or groups (comma-separated), who are allowed\n    full privileges, regardless of stored ACLs, across the cluster.\n    Only used when HBase security is enabled.\n    ",
+    "filename" : "hbase-site.xml"
+  }, {
+    "name" : "hbase.zookeeper.property.clientPort",
+    "value" : "2181",
+    "description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
+    "filename" : "hbase-site.xml"
+  }, {
+    "name" : "hbase.regionserver.optionalcacheflushinterval",
+    "value" : "10000",
+    "description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
+    "filename" : "hbase-site.xml"
+  }, {
+    "name" : "hbase.zookeeper.useMulti",
+    "value" : "true",
+    "description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).åá\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
+    "filename" : "hbase-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "HBASE_MASTER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "HBASE_REGIONSERVER",
+    "category" : "SLAVE",
+    "client" : false,
+    "master" : false
+  }, {
+    "name" : "HBASE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "HBASE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 20 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/HCATALOG.json

@@ -0,0 +1,20 @@
+{
+  "name" : "HCATALOG",
+  "version" : "0.5.0",
+  "user" : "root",
+  "comment" : "This is comment for HCATALOG service",
+  "properties" : [ ],
+  "components" : [ {
+    "name" : "HCAT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : true,
+  "clientComponent" : {
+    "name" : "HCAT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 210 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/HDFS.json

@@ -0,0 +1,210 @@
+{
+  "name" : "HDFS",
+  "version" : "1.1.2",
+  "user" : "root",
+  "comment" : "Apache Hadoop Distributed File System",
+  "properties" : [ {
+    "name" : "dfs.datanode.socket.write.timeout",
+    "value" : "0",
+    "description" : "DFS Client write socket timeout",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.replication.max",
+    "value" : "50",
+    "description" : "Maximal block replication.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.heartbeat.interval",
+    "value" : "3",
+    "description" : "Determines datanode heartbeat interval in seconds.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.safemode.threshold.pct",
+    "value" : "1.0f",
+    "description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.balance.bandwidthPerSec",
+    "value" : "6250000",
+    "description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.block.size",
+    "value" : "134217728",
+    "description" : "The default block size for new files.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.ipc.address",
+    "value" : "0.0.0.0:8010",
+    "description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.blockreport.initialDelay",
+    "value" : "120",
+    "description" : "Delay for first block report in seconds.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.du.pct",
+    "value" : "0.85f",
+    "description" : "When calculating remaining space, only use this percentage of the real available space\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.handler.count",
+    "value" : "40",
+    "description" : "The number of server threads for the namenode.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.max.xcievers",
+    "value" : "4096",
+    "description" : "PRIVATE CONFIG VARIABLE",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.umaskmode",
+    "value" : "077",
+    "description" : "\nThe octal umask used when creating files and directories.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.web.ugi",
+    "value" : "gopher,gopher",
+    "description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.permissions",
+    "value" : "true",
+    "description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.permissions.supergroup",
+    "value" : "hdfs",
+    "description" : "The name of the group of super-users.",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.namenode.handler.count",
+    "value" : "100",
+    "description" : "Added to grow Queue size so that more client connections are allowed",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "ipc.server.max.response.size",
+    "value" : "5242880",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.block.access.token.enable",
+    "value" : "true",
+    "description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.secondary.https.port",
+    "value" : "50490",
+    "description" : "The https port where secondary-namenode binds",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.https.port",
+    "value" : "50470",
+    "description" : "The https port where namenode binds",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.access.time.precision",
+    "value" : "0",
+    "description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.cluster.administrators",
+    "value" : " hdfs",
+    "description" : "ACL for who all can view the default servlets in the HDFS",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "ipc.server.read.threadpool.size",
+    "value" : "5",
+    "description" : null,
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "dfs.datanode.failed.volumes.tolerated",
+    "value" : "0",
+    "description" : "Number of failed disks datanode would tolerate",
+    "filename" : "hdfs-site.xml"
+  }, {
+    "name" : "io.file.buffer.size",
+    "value" : "131072",
+    "description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.serializations",
+    "value" : "org.apache.hadoop.io.serializer.WritableSerialization",
+    "description" : null,
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "io.compression.codec.lzo.class",
+    "value" : "com.hadoop.compression.lzo.LzoCodec",
+    "description" : "The implementation for lzo codec.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.trash.interval",
+    "value" : "360",
+    "description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.edits.dir",
+    "value" : "${fs.checkpoint.dir}",
+    "description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.period",
+    "value" : "21600",
+    "description" : "The number of seconds between two periodic checkpoints.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "fs.checkpoint.size",
+    "value" : "536870912",
+    "description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.idlethreshold",
+    "value" : "8000",
+    "description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.connection.maxidletime",
+    "value" : "30000",
+    "description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "ipc.client.connect.max.retries",
+    "value" : "50",
+    "description" : "Defines the maximum number of retries for IPC connections.",
+    "filename" : "core-site.xml"
+  }, {
+    "name" : "webinterface.private.actions",
+    "value" : "false",
+    "description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
+    "filename" : "core-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "NAMENODE",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "DATANODE",
+    "category" : "SLAVE",
+    "client" : false,
+    "master" : false
+  }, {
+    "name" : "SECONDARY_NAMENODE",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "HDFS_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "HDFS_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 95 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/HIVE.json

@@ -0,0 +1,95 @@
+{
+  "name" : "HIVE",
+  "version" : "0.10.0",
+  "user" : "root",
+  "comment" : "Data warehouse system for ad-hoc queries & analysis of large datasets and table & storage management service",
+  "properties" : [ {
+    "name" : "hive.metastore.local",
+    "value" : "false",
+    "description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "javax.jdo.option.ConnectionDriverName",
+    "value" : "com.mysql.jdbc.Driver",
+    "description" : "Driver class name for a JDBC metastore",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.metastore.warehouse.dir",
+    "value" : "/apps/hive/warehouse",
+    "description" : "location of default database for the warehouse",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.metastore.cache.pinobjtypes",
+    "value" : "Table,Database,Type,FieldSchema,Order",
+    "description" : "List of comma separated metastore object types that should be pinned in the cache",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.semantic.analyzer.factory.impl",
+    "value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
+    "description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hadoop.clientside.fs.operations",
+    "value" : "true",
+    "description" : "FS operations are owned by client",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.metastore.client.socket.timeout",
+    "value" : "60",
+    "description" : "MetaStore Client socket timeout in seconds",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.metastore.execute.setugi",
+    "value" : "true",
+    "description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.security.authorization.enabled",
+    "value" : "true",
+    "description" : "enable or disable the hive client authorization",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.security.authorization.manager",
+    "value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
+    "description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "hive.server2.enable.doAs",
+    "value" : "true",
+    "description" : null,
+    "filename" : "hive-site.xml"
+  }, {
+    "name" : "fs.hdfs.impl.disable.cache",
+    "value" : "true",
+    "description" : null,
+    "filename" : "hive-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "HIVE_METASTORE",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "HIVE_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "MYSQL_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "HIVE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "HIVE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 230 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/MAPREDUCE.json

@@ -0,0 +1,230 @@
+{
+  "name" : "MAPREDUCE",
+  "version" : "1.1.2",
+  "user" : "mapred",
+  "comment" : "Apache Hadoop Distributed Processing Framework",
+  "properties" : [ {
+    "name" : "io.sort.record.percent",
+    "value" : ".2",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "io.sort.factor",
+    "value" : "100",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
+    "value" : "250",
+    "description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.tracker.handler.count",
+    "value" : "50",
+    "description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.system.dir",
+    "value" : "/mapred/system",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.cluster.administrators",
+    "value" : " hadoop",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.reduce.parallel.copies",
+    "value" : "30",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "tasktracker.http.threads",
+    "value" : "50",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.map.tasks.speculative.execution",
+    "value" : "false",
+    "description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.reduce.tasks.speculative.execution",
+    "value" : "false",
+    "description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.reduce.slowstart.completed.maps",
+    "value" : "0.05",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.inmem.merge.threshold",
+    "value" : "1000",
+    "description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.shuffle.merge.percent",
+    "value" : "0.66",
+    "description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.shuffle.input.buffer.percent",
+    "value" : "0.7",
+    "description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.output.compression.type",
+    "value" : "BLOCK",
+    "description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.completeuserjobs.maximum",
+    "value" : "0",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.restart.recover",
+    "value" : "false",
+    "description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.reduce.input.buffer.percent",
+    "value" : "0.0",
+    "description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.reduce.input.limit",
+    "value" : "10737418240",
+    "description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.task.timeout",
+    "value" : "600000",
+    "description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "jetty.connector",
+    "value" : "org.mortbay.jetty.nio.SelectChannelConnector",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.child.root.logger",
+    "value" : "INFO,TLA",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.max.tracker.blacklists",
+    "value" : "16",
+    "description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.healthChecker.interval",
+    "value" : "135000",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.healthChecker.script.timeout",
+    "value" : "60000",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.tracker.persist.jobstatus.active",
+    "value" : "false",
+    "description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.tracker.persist.jobstatus.hours",
+    "value" : "1",
+    "description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.retirejob.check",
+    "value" : "10000",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.retirejob.interval",
+    "value" : "0",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.tracker.history.completed.location",
+    "value" : "/mapred/history/done",
+    "description" : "No description",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
+    "value" : "false",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.job.reuse.jvm.num.tasks",
+    "value" : "1",
+    "description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "hadoop.job.history.user.location",
+    "value" : "none",
+    "description" : null,
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.jobtracker.staging.root.dir",
+    "value" : "/user",
+    "description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.tasktracker.group",
+    "value" : "hadoop",
+    "description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.jobtracker.split.metainfo.maxsize",
+    "value" : "50000000",
+    "description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapreduce.history.server.embedded",
+    "value" : "false",
+    "description" : "Should job history server be embedded within Job tracker\nprocess",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.blacklist.fault-timeout-window",
+    "value" : "180",
+    "description" : "\n    3-hour sliding window (value is in minutes)\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.jobtracker.blacklist.fault-bucket-width",
+    "value" : "15",
+    "description" : "\n    15-minute bucket size (value is in minutes)\n  ",
+    "filename" : "mapred-site.xml"
+  }, {
+    "name" : "mapred.queue.names",
+    "value" : "default",
+    "description" : " Comma separated list of queues configured for this jobtracker.",
+    "filename" : "mapred-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "JOBTRACKER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "TASKTRACKER",
+    "category" : "SLAVE",
+    "client" : false,
+    "master" : false
+  }, {
+    "name" : "MAPREDUCE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "MAPREDUCE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 155 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/OOZIE.json

@@ -0,0 +1,155 @@
+{
+  "name" : "OOZIE",
+  "version" : "3.2.0",
+  "user" : "root",
+  "comment" : "System for workflow coordination and execution of Apache Hadoop jobs",
+  "properties" : [ {
+    "name" : "oozie.base.url",
+    "value" : "http://localhost:11000/oozie",
+    "description" : "Base Oozie URL.",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.system.id",
+    "value" : "oozie-${user.name}",
+    "description" : "\n    The Oozie system ID.\n    ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.systemmode",
+    "value" : "NORMAL",
+    "description" : "\n     System mode for  Oozie at startup.\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.AuthorizationService.security.enabled",
+    "value" : "true",
+    "description" : "\n     Specifies whether security (user name/admin role) is enabled or not.\n     If disabled any user can manage Oozie system and manage any job.\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.PurgeService.older.than",
+    "value" : "30",
+    "description" : "\n     Jobs older than this value, in days, will be purged by the PurgeService.\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.PurgeService.purge.interval",
+    "value" : "3600",
+    "description" : "\n     Interval at which the purge service will run, in seconds.\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.CallableQueueService.queue.size",
+    "value" : "1000",
+    "description" : "Max callable queue size",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.CallableQueueService.threads",
+    "value" : "10",
+    "description" : "Number of threads used for executing callables",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.CallableQueueService.callable.concurrency",
+    "value" : "3",
+    "description" : "\n     Maximum concurrency for a given callable type.\n     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).\n     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).\n     All commands that use action executors (action-start, action-end, action-kill and action-check) use\n     the action type as the callable type.\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.coord.normal.default.timeout",
+    "value" : "120",
+    "description" : "Default timeout for a coordinator action input check (in minutes) for normal job.\n      -1 means infinite timeout",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.db.schema.name",
+    "value" : "oozie",
+    "description" : "\n      Oozie DataBase Name\n     ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.HadoopAccessorService.jobTracker.whitelist",
+    "value" : " ",
+    "description" : "\n      Whitelisted job tracker for Oozie service.\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.authentication.type",
+    "value" : "simple",
+    "description" : "\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.HadoopAccessorService.nameNode.whitelist",
+    "value" : " ",
+    "description" : "\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.WorkflowAppService.system.libpath",
+    "value" : "/user/${user.name}/share/lib",
+    "description" : "\n      System library path to use for workflow applications.\n      This path is added to workflow application if their job properties sets\n      the property 'oozie.use.system.libpath' to true.\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "use.system.libpath.for.mapreduce.and.pig.jobs",
+    "value" : "false",
+    "description" : "\n      If set to true, submissions of MapReduce and Pig jobs will include\n      automatically the system library path, thus not requiring users to\n      specify where the Pig JAR files are. Instead, the ones from the system\n      library path are used.\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.authentication.kerberos.name.rules",
+    "value" : "\n        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/\n        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/\n        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/\n        DEFAULT\n        ",
+    "description" : "The mapping from kerberos principal names to local OS user names.",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.HadoopAccessorService.hadoop.configurations",
+    "value" : "*=/etc/hadoop/conf",
+    "description" : "\n          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of\n          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is\n          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains\n          the relevant Hadoop *-site.xml files. If the path is relative is looked within\n          the Oozie configuration directory; though the path can be absolute (i.e. to point\n          to Hadoop client conf/ directories in the local filesystem.\n      ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.ActionService.executor.ext.classes",
+    "value" : "\n            org.apache.oozie.action.email.EmailActionExecutor,\n            org.apache.oozie.action.hadoop.HiveActionExecutor,\n            org.apache.oozie.action.hadoop.ShellActionExecutor,\n            org.apache.oozie.action.hadoop.SqoopActionExecutor,\n            org.apache.oozie.action.hadoop.DistcpActionExecutor\n        ",
+    "description" : null,
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.SchemaService.wf.ext.schemas",
+    "value" : "shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd",
+    "description" : null,
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.create.db.schema",
+    "value" : "false",
+    "description" : "\n            Creates Oozie DB.\n\n            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.\n            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.\n        ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.jdbc.driver",
+    "value" : "org.apache.derby.jdbc.EmbeddedDriver",
+    "description" : "\n            JDBC driver class.\n        ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.jdbc.url",
+    "value" : "jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true",
+    "description" : "\n            JDBC URL.\n        ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.jdbc.username",
+    "value" : "sa",
+    "description" : "\n            DB user name.\n        ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.jdbc.password",
+    "value" : " ",
+    "description" : "\n            DB user password.\n\n            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,\n                       if empty Configuration assumes it is NULL.\n        ",
+    "filename" : "oozie-site.xml"
+  }, {
+    "name" : "oozie.service.JPAService.pool.max.active.conn",
+    "value" : "10",
+    "description" : "\n             Max number of connections.\n        ",
+    "filename" : "oozie-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "OOZIE_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "OOZIE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "OOZIE_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 90 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/WEBHCAT.json

@@ -0,0 +1,90 @@
+{
+  "name" : "WEBHCAT",
+  "version" : "0.5.0",
+  "user" : "root",
+  "comment" : "This is comment for WEBHCAT service",
+  "properties" : [ {
+    "name" : "templeton.port",
+    "value" : "50111",
+    "description" : "The HTTP port for the main server.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.hadoop.conf.dir",
+    "value" : "/etc/hadoop/conf",
+    "description" : "The path to the Hadoop configuration.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.jar",
+    "value" : "/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar",
+    "description" : "The path to the Templeton jar file.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.libjars",
+    "value" : "/usr/lib/zookeeper/zookeeper.jar",
+    "description" : "Jars to add the the classpath.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.hadoop",
+    "value" : "/usr/bin/hadoop",
+    "description" : "The path to the Hadoop executable.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.pig.archive",
+    "value" : "hdfs:///apps/webhcat/pig.tar.gz",
+    "description" : "The path to the Pig archive.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.pig.path",
+    "value" : "pig.tar.gz/pig/bin/pig",
+    "description" : "The path to the Pig executable.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.hcat",
+    "value" : "/usr/bin/hcat",
+    "description" : "The path to the hcatalog executable.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.hive.archive",
+    "value" : "hdfs:///apps/webhcat/hive.tar.gz",
+    "description" : "The path to the Hive archive.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.hive.path",
+    "value" : "hive.tar.gz/hive/bin/hive",
+    "description" : "The path to the Hive executable.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.storage.class",
+    "value" : "org.apache.hcatalog.templeton.tool.ZooKeeperStorage",
+    "description" : "The class to use as storage",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.override.enabled",
+    "value" : "false",
+    "description" : "\n     Enable the override path in templeton.override.jars\n   ",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.streaming.jar",
+    "value" : "hdfs:///apps/webhcat/hadoop-streaming.jar",
+    "description" : "The hdfs path to the Hadoop streaming jar file.",
+    "filename" : "webhcat-site.xml"
+  }, {
+    "name" : "templeton.exec.timeout",
+    "value" : "60000",
+    "description" : "Time out for templeton api",
+    "filename" : "webhcat-site.xml"
+  } ],
+  "components" : [ {
+    "name" : "WEBHCAT_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "WEBHCAT_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }
+}

+ 25 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version122/ZOOKEEPER.json

@@ -0,0 +1,25 @@
+{
+  "name" : "ZOOKEEPER",
+  "version" : "3.4.5",
+  "user" : "root",
+  "comment" : "This is comment for ZOOKEEPER service",
+  "properties" : [ ],
+  "components" : [ {
+    "name" : "ZOOKEEPER_SERVER",
+    "category" : "MASTER",
+    "client" : false,
+    "master" : true
+  }, {
+    "name" : "ZOOKEEPER_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  } ],
+  "clientOnlyService" : false,
+  "clientComponent" : {
+    "name" : "ZOOKEEPER_CLIENT",
+    "category" : "CLIENT",
+    "client" : true,
+    "master" : false
+  }
+}

+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HBASE.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/HBASE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HCATALOG.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/HCATALOG.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HDFS.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/HDFS.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HIVE.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/HIVE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/HUE.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/HUE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/MAPREDUCE.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/MAPREDUCE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/OOZIE.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/OOZIE.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/WEBHCAT.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/WEBHCAT.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/ZOOKEEPER.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/ZOOKEEPER.json


+ 0 - 0
ambari-web/app/assets/data/wizard/stack/hdp/version1.3.0/global.json → ambari-web/app/assets/data/wizard/stack/hdp/version130/global.json


+ 0 - 65
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/GANGLIA.json

@@ -1,65 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations?fields=*&_=1368459065278",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_runtime_dir",
-      "StackConfigurations" : {
-        "property_description" : "Run directories for Ganglia",
-        "property_value" : "/var/run/ganglia/hdp",
-        "stack_version" : "1.3.0",
-        "property_name" : "ganglia_runtime_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmetad_user",
-      "StackConfigurations" : {
-        "property_description" : "User ",
-        "property_value" : "nobody",
-        "stack_version" : "1.3.0",
-        "property_name" : "gmetad_user",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/rrdcached_base_dir",
-      "StackConfigurations" : {
-        "property_description" : "Default directory for saving the rrd files on ganglia server",
-        "property_value" : "/var/lib/ganglia/rrds",
-        "stack_version" : "1.3.0",
-        "property_name" : "rrdcached_base_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/gmond_user",
-      "StackConfigurations" : {
-        "property_description" : "User ",
-        "property_value" : "nobody",
-        "stack_version" : "1.3.0",
-        "property_name" : "gmond_user",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/GANGLIA/configurations/ganglia_conf_dir",
-      "StackConfigurations" : {
-        "property_description" : "Config directory for Ganglia",
-        "property_value" : "/etc/ganglia/hdp",
-        "stack_version" : "1.3.0",
-        "property_name" : "ganglia_conf_dir",
-        "service_name" : "GANGLIA",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 281
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HBASE.json

@@ -1,281 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/zookeeper_sessiontimeout",
-      "StackConfigurations" : {
-        "property_description" : "ZooKeeper Session Timeout",
-        "property_value" : "60000",
-        "stack_version" : "1.3.0",
-        "property_name" : "zookeeper_sessiontimeout",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_master_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "HBase Master Heap Size",
-        "property_value" : "1024",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_master_heapsize",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstore_compactionthreshold",
-      "StackConfigurations" : {
-        "property_description" : "HBase HStore compaction threshold.",
-        "property_value" : "3",
-        "stack_version" : "1.3.0",
-        "property_name" : "hstore_compactionthreshold",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_blockcache_size",
-      "StackConfigurations" : {
-        "property_description" : "HFile block cache size.",
-        "property_value" : "0.25",
-        "stack_version" : "1.3.0",
-        "property_name" : "hfile_blockcache_size",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.client.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HRegionInterface protocol implementations (ie. \n    clients talking to HRegionServers)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.client.protocol.acl",
-        "service_name" : "HBASE",
-        "type" : "hbase-policy.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_blockmultiplier",
-      "StackConfigurations" : {
-        "property_description" : "HBase Region Block Multiplier",
-        "property_value" : "2",
-        "stack_version" : "1.3.0",
-        "property_name" : "hregion_blockmultiplier",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.useMulti",
-      "StackConfigurations" : {
-        "property_description" : "Instructs HBase to make use of ZooKeeper's multi-update functionality.\n    This allows certain ZooKeeper operations to complete more quickly and prevents some issues\n    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).В·\n    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+\n    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will\n    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).\n    ",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase.zookeeper.useMulti",
-        "service_name" : "HBASE",
-        "type" : "hbase-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_conf_dir",
-      "StackConfigurations" : {
-        "property_description" : "Config Directory for HBase.",
-        "property_value" : "/etc/hbase",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_conf_dir",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.zookeeper.property.clientPort",
-      "StackConfigurations" : {
-        "property_description" : "Property from ZooKeeper's config zoo.cfg.\n    The port at which the clients will connect.\n    ",
-        "property_value" : "2181",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase.zookeeper.property.clientPort",
-        "service_name" : "HBASE",
-        "type" : "hbase-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_log_dir",
-      "StackConfigurations" : {
-        "property_description" : "Log Directories for HBase.",
-        "property_value" : "/var/log/hbase",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_log_dir",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_tmp_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hbase temp directory",
-        "property_value" : "/var/log/hbase",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_tmp_dir",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hfile_max_keyvalue_size",
-      "StackConfigurations" : {
-        "property_description" : "HBase Client Maximum key-value Size",
-        "property_value" : "10485760",
-        "stack_version" : "1.3.0",
-        "property_name" : "hfile_max_keyvalue_size",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_regionserver_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "Log Directories for HBase.",
-        "property_value" : "1024",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_regionserver_heapsize",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hregion_majorcompaction",
-      "StackConfigurations" : {
-        "property_description" : "HBase Major Compaction.",
-        "property_value" : "86400000",
-        "stack_version" : "1.3.0",
-        "property_name" : "hregion_majorcompaction",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/client_scannercaching",
-      "StackConfigurations" : {
-        "property_description" : "Base Client Scanner Caching",
-        "property_value" : "100",
-        "stack_version" : "1.3.0",
-        "property_name" : "client_scannercaching",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.masterregion.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HMasterRegionInterface protocol implementations\n    (for HRegionServers communicating with HMaster)\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.masterregion.protocol.acl",
-        "service_name" : "HBASE",
-        "type" : "hbase-policy.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.cluster.distributed",
-      "StackConfigurations" : {
-        "property_description" : "The mode the cluster will be in. Possible values are\n      false for standalone mode and true for distributed mode.  If\n      false, startup will run all HBase and ZooKeeper daemons together\n      in the one JVM.\n    ",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase.cluster.distributed",
-        "service_name" : "HBASE",
-        "type" : "hbase-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.master.lease.thread.wakefrequency",
-      "StackConfigurations" : {
-        "property_description" : "The interval between checks for expired region server leases.\n    This value has been reduced due to the other reduced values above so that\n    the master will notice a dead region server sooner. The default is 15 seconds.\n    ",
-        "property_value" : "3000",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase.master.lease.thread.wakefrequency",
-        "service_name" : "HBASE",
-        "type" : "hbase-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/regionserver_handlers",
-      "StackConfigurations" : {
-        "property_description" : "HBase RegionServer Handler",
-        "property_value" : "30",
-        "stack_version" : "1.3.0",
-        "property_name" : "regionserver_handlers",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase.regionserver.optionalcacheflushinterval",
-      "StackConfigurations" : {
-        "property_description" : "\n      Amount of time to wait since the last time a region was flushed before\n      invoking an optional cache flush. Default 60,000.\n    ",
-        "property_value" : "10000",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase.regionserver.optionalcacheflushinterval",
-        "service_name" : "HBASE",
-        "type" : "hbase-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hbase_pid_dir",
-      "StackConfigurations" : {
-        "property_description" : "Log Directories for HBase.",
-        "property_value" : "/var/run/hbase",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_pid_dir",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/hstorefile_maxsize",
-      "StackConfigurations" : {
-        "property_description" : "Maximum HStoreFile Size",
-        "property_value" : "1073741824",
-        "stack_version" : "1.3.0",
-        "property_name" : "hstorefile_maxsize",
-        "service_name" : "HBASE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HBASE/configurations/security.admin.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for HMasterInterface protocol implementation (ie. \n    clients talking to HMaster for admin operations).\n    The ACL is a comma-separated list of user and group names. The user and \n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\". \n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.admin.protocol.acl",
-        "service_name" : "HBASE",
-        "type" : "hbase-policy.xml",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 4
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HCATALOG.json

@@ -1,4 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HCATALOG/configurations?fields=*",
-  "items" : [ ]
-}

+ 0 - 737
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HDFS.json

@@ -1,737 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.blockreport.initialDelay",
-      "StackConfigurations" : {
-        "property_description" : "Delay for first block report in seconds.",
-        "property_value" : "120",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.blockreport.initialDelay",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.safemode.threshold.pct",
-      "StackConfigurations" : {
-        "property_description" : "\n        Specifies the percentage of blocks that should satisfy\n        the minimal replication requirement defined by dfs.replication.min.\n        Values less than or equal to 0 mean not to start in safe mode.\n        Values greater than 1 will make safe mode permanent.\n        ",
-        "property_value" : "1.0f",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.safemode.threshold.pct",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_dir",
-      "StackConfigurations" : {
-        "property_description" : "Secondary NameNode checkpoint dir.",
-        "property_value" : "/hadoop/hdfs/namesecondary",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs_checkpoint_dir",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.umaskmode",
-      "StackConfigurations" : {
-        "property_description" : "\nThe octal umask used when creating files and directories.\n",
-        "property_value" : "077",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.umaskmode",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.compression.codec.lzo.class",
-      "StackConfigurations" : {
-        "property_description" : "The implementation for lzo codec.",
-        "property_value" : "com.hadoop.compression.lzo.LzoCodec",
-        "stack_version" : "1.3.0",
-        "property_name" : "io.compression.codec.lzo.class",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.heartbeat.interval",
-      "StackConfigurations" : {
-        "property_description" : "Determines datanode heartbeat interval in seconds.",
-        "property_value" : "3",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.heartbeat.interval",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_datanode_failed_volume_tolerated",
-      "StackConfigurations" : {
-        "property_description" : "DataNode volumes failure toleration",
-        "property_value" : "0",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs_datanode_failed_volume_tolerated",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_name_dir",
-      "StackConfigurations" : {
-        "property_description" : "NameNode Directories.",
-        "property_value" : "/hadoop/hdfs/namenode",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs_name_dir",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_size",
-      "StackConfigurations" : {
-        "property_description" : "FS Checkpoint Size.",
-        "property_value" : "0.5",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs_checkpoint_size",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.balance.bandwidthPerSec",
-      "StackConfigurations" : {
-        "property_description" : "\n        Specifies the maximum amount of bandwidth that each datanode\n        can utilize for the balancing purpose in term of\n        the number of bytes per second.\n  ",
-        "property_value" : "6250000",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.balance.bandwidthPerSec",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_maxnewsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode maximum new generation size",
-        "property_value" : "640",
-        "stack_version" : "1.3.0",
-        "property_name" : "namenode_opt_maxnewsize",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.edits.dir",
-      "StackConfigurations" : {
-        "property_description" : "Determines where on the local filesystem the DFS secondary\n        name node should store the temporary edits to merge.\n        If this is a comma-delimited list of directoires then teh edits is\n        replicated in all of the directoires for redundancy.\n        Default value is same as fs.checkpoint.dir\n    ",
-        "property_value" : "${fs.checkpoint.dir}",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs.checkpoint.edits.dir",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/keytab_path",
-      "StackConfigurations" : {
-        "property_description" : "KeyTab Directory.",
-        "property_value" : "/etc/security/keytabs",
-        "stack_version" : "1.3.0",
-        "property_name" : "keytab_path",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.size",
-      "StackConfigurations" : {
-        "property_description" : "The default block size for new files.",
-        "property_value" : "134217728",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.block.size",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security_enabled",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop Security",
-        "property_value" : "false",
-        "stack_version" : "1.3.0",
-        "property_name" : "security_enabled",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.serializations",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "org.apache.hadoop.io.serializer.WritableSerialization",
-        "stack_version" : "1.3.0",
-        "property_name" : "io.serializations",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.task.umbilical.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for TaskUmbilicalProtocol, used by the map and reduce\n    tasks to communicate with the parent tasktracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.task.umbilical.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/kerberos_domain",
-      "StackConfigurations" : {
-        "property_description" : "Kerberos realm.",
-        "property_value" : "EXAMPLE.COM",
-        "stack_version" : "1.3.0",
-        "property_name" : "kerberos_domain",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_data_dir",
-      "StackConfigurations" : {
-        "property_description" : "Data directories for Data Nodes.",
-        "property_value" : "/hadoop/hdfs/data",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs_data_dir",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hdfs_log_dir_prefix",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop Log Dir Prefix",
-        "property_value" : "/var/log/hadoop",
-        "stack_version" : "1.3.0",
-        "property_name" : "hdfs_log_dir_prefix",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for DatanodeProtocol, which is used by datanodes to\n    communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.datanode.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connect.max.retries",
-      "StackConfigurations" : {
-        "property_description" : "Defines the maximum number of retries for IPC connections.",
-        "property_value" : "50",
-        "stack_version" : "1.3.0",
-        "property_name" : "ipc.client.connect.max.retries",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs_checkpoint_period",
-      "StackConfigurations" : {
-        "property_description" : "HDFS Maximum Checkpoint Delay",
-        "property_value" : "21600",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs_checkpoint_period",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "The number of server threads for the namenode.",
-        "property_value" : "40",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.namenode.handler.count",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.ipc.address",
-      "StackConfigurations" : {
-        "property_description" : "\nThe datanode ipc server address and port.\nIf the port is 0 then the server will start on a free port.\n",
-        "property_value" : "0.0.0.0:8010",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.datanode.ipc.address",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs_webhdfs_enabled",
-      "StackConfigurations" : {
-        "property_description" : "WebHDFS enabled",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs_webhdfs_enabled",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.trash.interval",
-      "StackConfigurations" : {
-        "property_description" : "Number of minutes between trash checkpoints.\n  If zero, the trash feature is disabled.\n  ",
-        "property_value" : "360",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs.trash.interval",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.secondary.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where secondary-namenode binds",
-        "property_value" : "50490",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.secondary.https.port",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/datanode_du_reserved",
-      "StackConfigurations" : {
-        "property_description" : "Reserved space for HDFS",
-        "property_value" : "1",
-        "stack_version" : "1.3.0",
-        "property_name" : "datanode_du_reserved",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/io.file.buffer.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of buffer for use in sequence files.\n  The size of this buffer should probably be a multiple of hardware\n  page size (4096 on Intel x86), and it determines how much data is\n  buffered during read and write operations.",
-        "property_value" : "131072",
-        "stack_version" : "1.3.0",
-        "property_name" : "io.file.buffer.size",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.replication.max",
-      "StackConfigurations" : {
-        "property_description" : "Maximal block replication.\n  ",
-        "property_value" : "50",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.replication.max",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_pid_dir_prefix",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop PID Dir Prefix",
-        "property_value" : "/var/run/hadoop",
-        "stack_version" : "1.3.0",
-        "property_name" : "hadoop_pid_dir_prefix",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterDatanodeProtocol, the inter-datanode protocol\n    for updating generation timestamp.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.inter.datanode.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.socket.write.timeout",
-      "StackConfigurations" : {
-        "property_description" : "DFS Client write socket timeout",
-        "property_value" : "0",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.datanode.socket.write.timeout",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.max.xcievers",
-      "StackConfigurations" : {
-        "property_description" : "PRIVATE CONFIG VARIABLE",
-        "property_value" : "4096",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.datanode.max.xcievers",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.max.response.size",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5242880",
-        "stack_version" : "1.3.0",
-        "property_name" : "ipc.server.max.response.size",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.size",
-      "StackConfigurations" : {
-        "property_description" : "The size of the current edit log (in bytes) that triggers\n       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.\n  ",
-        "property_value" : "536870912",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs.checkpoint.size",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.namenode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for NamenodeProtocol, the protocol used by the secondary\n    namenode to communicate with the namenode.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.namenode.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions",
-      "StackConfigurations" : {
-        "property_description" : "\nIf \"true\", enable permission checking in HDFS.\nIf \"false\", permission checking is turned off,\nbut all other behavior is unchanged.\nSwitching from one parameter value to the other does not change the mode,\nowner or group of files or directories.\n",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.permissions",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.https.port",
-      "StackConfigurations" : {
-        "property_description" : "The https port where namenode binds",
-        "property_value" : "50470",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.https.port",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode Java heap size",
-        "property_value" : "1024",
-        "stack_version" : "1.3.0",
-        "property_name" : "namenode_heapsize",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.namenode.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "Added to grow Queue size so that more client connections are allowed",
-        "property_value" : "100",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.namenode.handler.count",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.web.ugi",
-      "StackConfigurations" : {
-        "property_description" : "The user account used by the web interface.\nSyntax: USERNAME,GROUP1,GROUP2, ...\n",
-        "property_value" : "gopher,gopher",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.web.ugi",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.du.pct",
-      "StackConfigurations" : {
-        "property_description" : "When calculating remaining space, only use this percentage of the real available space\n",
-        "property_value" : "0.85f",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.datanode.du.pct",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/fs.checkpoint.period",
-      "StackConfigurations" : {
-        "property_description" : "The number of seconds between two periodic checkpoints.\n  ",
-        "property_value" : "21600",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs.checkpoint.period",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.block.access.token.enable",
-      "StackConfigurations" : {
-        "property_description" : "\nIf \"true\", access tokens are used as capabilities for accessing datanodes.\nIf \"false\", no access tokens are checked on accessing datanodes.\n",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.block.access.token.enable",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.cluster.administrators",
-      "StackConfigurations" : {
-        "property_description" : "ACL for who all can view the default servlets in the HDFS",
-        "property_value" : " hdfs",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.cluster.administrators",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dtnode_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "DataNode maximum Java heap size",
-        "property_value" : "1024",
-        "stack_version" : "1.3.0",
-        "property_name" : "dtnode_heapsize",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.job.submission.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for JobSubmissionProtocol, used by job clients to\n    communciate with the jobtracker for job submission, querying job status etc.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.job.submission.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/webinterface.private.actions",
-      "StackConfigurations" : {
-        "property_description" : " If set to true, the web interfaces of JT and NN may contain\n                actions, such as kill job, delete file, etc., that should\n                not be exposed to public. Enable this option if the interfaces\n                are only reachable by those who have the right authorization.\n  ",
-        "property_value" : "false",
-        "stack_version" : "1.3.0",
-        "property_name" : "webinterface.private.actions",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.connection.maxidletime",
-      "StackConfigurations" : {
-        "property_description" : "The maximum time after which a client will bring down the\n               connection to the server.\n  ",
-        "property_value" : "30000",
-        "stack_version" : "1.3.0",
-        "property_name" : "ipc.client.connection.maxidletime",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.permissions.supergroup",
-      "StackConfigurations" : {
-        "property_description" : "The name of the group of super-users.",
-        "property_value" : "hdfs",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.permissions.supergroup",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/hadoop_heapsize",
-      "StackConfigurations" : {
-        "property_description" : "Hadoop maximum Java heap size",
-        "property_value" : "1024",
-        "stack_version" : "1.3.0",
-        "property_name" : "hadoop_heapsize",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.client.idlethreshold",
-      "StackConfigurations" : {
-        "property_description" : "Defines the threshold number of connections after which\n               connections will be inspected for idleness.\n  ",
-        "property_value" : "8000",
-        "stack_version" : "1.3.0",
-        "property_name" : "ipc.client.idlethreshold",
-        "filename" : "core-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.inter.tracker.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for InterTrackerProtocol, used by the tasktrackers to\n    communicate with the jobtracker.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.inter.tracker.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.datanode.failed.volumes.tolerated",
-      "StackConfigurations" : {
-        "property_description" : "Number of failed disks datanode would tolerate",
-        "property_value" : "0",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.datanode.failed.volumes.tolerated",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/namenode_opt_newsize",
-      "StackConfigurations" : {
-        "property_description" : "NameNode new generation size",
-        "property_value" : "200",
-        "stack_version" : "1.3.0",
-        "property_name" : "namenode_opt_newsize",
-        "filename" : "global.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.datanode.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientDatanodeProtocol, the client-to-datanode protocol\n    for block recovery.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.client.datanode.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/security.client.protocol.acl",
-      "StackConfigurations" : {
-        "property_description" : "ACL for ClientProtocol, which is used by user code\n    via the DistributedFileSystem.\n    The ACL is a comma-separated list of user and group names. The user and\n    group list is separated by a blank. For e.g. \"alice,bob users,wheel\".\n    A special value of \"*\" means all users are allowed.",
-        "property_value" : "*",
-        "stack_version" : "1.3.0",
-        "property_name" : "security.client.protocol.acl",
-        "filename" : "hadoop-policy.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/ipc.server.read.threadpool.size",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5",
-        "stack_version" : "1.3.0",
-        "property_name" : "ipc.server.read.threadpool.size",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HDFS/configurations/dfs.access.time.precision",
-      "StackConfigurations" : {
-        "property_description" : "The access time for HDFS file is precise upto this value.\n               The default value is 1 hour. Setting a value of 0 disables\n               access times for HDFS.\n  ",
-        "property_value" : "0",
-        "stack_version" : "1.3.0",
-        "property_name" : "dfs.access.time.precision",
-        "filename" : "hdfs-site.xml",
-        "service_name" : "HDFS",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 209
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HIVE.json

@@ -1,209 +0,0 @@
-{
-  "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/javax.jdo.option.ConnectionDriverName",
-      "StackConfigurations" : {
-        "property_description" : "Driver class name for a JDBC metastore",
-        "property_value" : "com.mysql.jdbc.Driver",
-        "stack_version" : "1.3.0",
-        "property_name" : "javax.jdo.option.ConnectionDriverName",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_pid_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hive PID Dir.",
-        "property_value" : "/var/run/hive",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive_pid_dir",
-        "filename" : "global.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.enabled",
-      "StackConfigurations" : {
-        "property_description" : "enable or disable the hive client authorization",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.security.authorization.enabled",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_conf_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hive Conf Dir.",
-        "property_value" : "/etc/hive/conf",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive_conf_dir",
-        "filename" : "global.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hadoop.clientside.fs.operations",
-      "StackConfigurations" : {
-        "property_description" : "FS operations are owned by client",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hadoop.clientside.fs.operations",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.security.authorization.manager",
-      "StackConfigurations" : {
-        "property_description" : "the hive client authorization manager class name.\n    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  ",
-        "property_value" : "org.apache.hcatalog.security.HdfsAuthorizationProvider",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.security.authorization.manager",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/fs.hdfs.impl.disable.cache",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "fs.hdfs.impl.disable.cache",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.warehouse.dir",
-      "StackConfigurations" : {
-        "property_description" : "location of default database for the warehouse",
-        "property_value" : "/apps/hive/warehouse",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.metastore.warehouse.dir",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.semantic.analyzer.factory.impl",
-      "StackConfigurations" : {
-        "property_description" : "controls which SemanticAnalyzerFactory implemenation class is used by CLI",
-        "property_value" : "org.apache.hivealog.cli.HCatSemanticAnalyzerFactory",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.semantic.analyzer.factory.impl",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_aux_jars_path",
-      "StackConfigurations" : {
-        "property_description" : "Hive auxiliary jar path.",
-        "property_value" : "/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive_aux_jars_path",
-        "filename" : "global.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.client.socket.timeout",
-      "StackConfigurations" : {
-        "property_description" : "MetaStore Client socket timeout in seconds",
-        "property_value" : "60",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.metastore.client.socket.timeout",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.server2.enable.doAs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.server2.enable.doAs",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.cache.pinobjtypes",
-      "StackConfigurations" : {
-        "property_description" : "List of comma separated metastore object types that should be pinned in the cache",
-        "property_value" : "Table,Database,Type,FieldSchema,Order",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.metastore.cache.pinobjtypes",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.execute.setugi",
-      "StackConfigurations" : {
-        "property_description" : "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.",
-        "property_value" : "true",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.metastore.execute.setugi",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/mysql_connector_url",
-      "StackConfigurations" : {
-        "property_description" : "Hive PID Dir.",
-        "property_value" : "${download_url}/mysql-connector-java-5.1.18.zip",
-        "stack_version" : "1.3.0",
-        "property_name" : "mysql_connector_url",
-        "filename" : "global.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive.metastore.local",
-      "StackConfigurations" : {
-        "property_description" : "controls whether to connect to remove metastore server or\n    open a new metastore server in Hive Client JVM",
-        "property_value" : "false",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive.metastore.local",
-        "filename" : "hive-site.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://dev.hortonworks.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HIVE/configurations/hive_log_dir",
-      "StackConfigurations" : {
-        "property_description" : "Directory for Hive Log files.",
-        "property_value" : "/var/log/hive",
-        "stack_version" : "1.3.0",
-        "property_name" : "hive_log_dir",
-        "filename" : "global.xml",
-        "service_name" : "HIVE",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 353
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/HUE.json

@@ -1,353 +0,0 @@
-{
-  "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/pig_shell_command",
-      "StackConfigurations" : {
-        "property_description" : "Define and configure a new shell type pig.",
-        "property_value" : "/usr/bin/pig -l /dev/null",
-        "stack_version" : "1.3.0",
-        "property_name" : "pig_shell_command",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_name",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "sandbox",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_name",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_user",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "sandbox",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_user",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_host",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "localhost",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_host",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_password",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "1111",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_password",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/time_zone",
-      "StackConfigurations" : {
-        "property_description" : "Time zone name",
-        "property_value" : "America/Los_Angeles",
-        "stack_version" : "1.3.0",
-        "property_name" : "time_zone",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_host",
-      "StackConfigurations" : {
-        "property_description" : "Webserver listens on this address and port",
-        "property_value" : "0.0.0.0",
-        "stack_version" : "1.3.0",
-        "property_name" : "http_host",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_pid_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hue Pid Dir.",
-        "property_value" : "/var/run/hue",
-        "stack_version" : "1.3.0",
-        "property_name" : "hue_pid_dir",
-        "service_name" : "HUE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/tls",
-      "StackConfigurations" : {
-        "property_description" : "Whether to use a TLS (secure) connection when talking to the SMTP server.",
-        "property_value" : "no",
-        "stack_version" : "1.3.0",
-        "property_name" : "tls",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hadoop_mapred_home",
-      "StackConfigurations" : {
-        "property_description" : "The SMTP server information for email notification delivery.",
-        "property_value" : "/usr/lib/hadoop/lib",
-        "stack_version" : "1.3.0",
-        "property_name" : "hadoop_mapred_home",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/default_from_email",
-      "StackConfigurations" : {
-        "property_description" : "The SMTP server information for email notification delivery.",
-        "property_value" : "sandbox@hortonworks.com",
-        "stack_version" : "1.3.0",
-        "property_name" : "default_from_email",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/backend_auth_policy",
-      "StackConfigurations" : {
-        "property_description" : "Authentication backend.",
-        "property_value" : "desktop.auth.backend.AllowAllBackend",
-        "stack_version" : "1.3.0",
-        "property_name" : "backend_auth_policy",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hue_log_dir",
-      "StackConfigurations" : {
-        "property_description" : "Hue Log Dir.",
-        "property_value" : "/var/log/hue",
-        "stack_version" : "1.3.0",
-        "property_name" : "hue_log_dir",
-        "service_name" : "HUE",
-        "type" : "global.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/whitelist",
-      "StackConfigurations" : {
-        "property_description" : "proxy settings",
-        "property_value" : "(localhost|127\\.0\\.0\\.1):(50030|50070|50060|50075|50111)",
-        "stack_version" : "1.3.0",
-        "property_name" : "whitelist",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/jobtracker_port",
-      "StackConfigurations" : {
-        "property_description" : "The port where the JobTracker IPC listens on.",
-        "property_value" : "50030",
-        "stack_version" : "1.3.0",
-        "property_name" : "jobtracker_port",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_port",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "3306",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_port",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_port",
-      "StackConfigurations" : {
-        "property_description" : "The SMTP server information for email notification delivery.",
-        "property_value" : "25",
-        "stack_version" : "1.3.0",
-        "property_name" : "smtp_port",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/database_logging",
-      "StackConfigurations" : {
-        "property_description" : "To show database transactions, set database_logging to 1.\n      default, database_logging=0",
-        "property_value" : "0",
-        "stack_version" : "1.3.0",
-        "property_name" : "database_logging",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/send_debug_messages",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "1",
-        "stack_version" : "1.3.0",
-        "property_name" : "send_debug_messages",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_password",
-      "StackConfigurations" : {
-        "property_description" : "The SMTP server information for email notification delivery.",
-        "property_value" : "25",
-        "stack_version" : "1.3.0",
-        "property_name" : "smtp_password",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/django_debug_mode",
-      "StackConfigurations" : {
-        "property_description" : "Turn off debug",
-        "property_value" : "1",
-        "stack_version" : "1.3.0",
-        "property_name" : "django_debug_mode",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/use_cherrypy_server",
-      "StackConfigurations" : {
-        "property_description" : "Set to true to use CherryPy as the webserver, set to false\n      to use Spawning as the webserver. Defaults to Spawning if\n      key is not specified.",
-        "property_value" : "false",
-        "stack_version" : "1.3.0",
-        "property_name" : "use_cherrypy_server",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_shell_command",
-      "StackConfigurations" : {
-        "property_description" : "Define and configure a new shell type hbase.",
-        "property_value" : "/usr/bin/hbase shell",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_shell_command",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/bash_shell_command",
-      "StackConfigurations" : {
-        "property_description" : "Define and configure a new shell type bash for testing only\n      .",
-        "property_value" : "/bin/bash",
-        "stack_version" : "1.3.0",
-        "property_name" : "bash_shell_command",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/hbase_nice_name",
-      "StackConfigurations" : {
-        "property_description" : "Define and configure a new shell type hbase",
-        "property_value" : "HBase Shell",
-        "stack_version" : "1.3.0",
-        "property_name" : "hbase_nice_name",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_port",
-      "StackConfigurations" : {
-        "property_description" : "Webserver listens on this address and port",
-        "property_value" : "8000",
-        "stack_version" : "1.3.0",
-        "property_name" : "http_port",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/smtp_host",
-      "StackConfigurations" : {
-        "property_description" : "The SMTP server information for email notification delivery.",
-        "property_value" : "localhost",
-        "stack_version" : "1.3.0",
-        "property_name" : "smtp_host",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/db_engine",
-      "StackConfigurations" : {
-        "property_description" : "Configuration options for specifying the Desktop Database.",
-        "property_value" : "mysql",
-        "stack_version" : "1.3.0",
-        "property_name" : "db_engine",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    },
-    {
-      "href" : "http://ec2-72-44-40-84.compute-1.amazonaws.com:8080/api/v1/stacks2/HDP/versions/1.3.0/stackServices/HUE/configurations/http_500_debug_mode",
-      "StackConfigurations" : {
-        "property_description" : "Turn off backtrace for server error",
-        "property_value" : "1",
-        "stack_version" : "1.3.0",
-        "property_name" : "http_500_debug_mode",
-        "service_name" : "HUE",
-        "type" : "hue-site.xml",
-        "stack_name" : "HDP"
-      }
-    }
-  ]
-}

+ 0 - 545
ambari-web/app/assets/data/wizard/stack/hdp/version2.0.1/MAPREDUCE2.json

@@ -1,545 +0,0 @@
-{
-  "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations?fields=*",
-  "items" : [
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.system.dir",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/system",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.system.dir",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.parallel.copies",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "30",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.parallel.copies",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.active",
-      "StackConfigurations" : {
-        "property_description" : "Indicates if persistency of job status information is\n  active or not.\n  ",
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.active",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-timeout-window",
-      "StackConfigurations" : {
-        "property_description" : "\n    3-hour sliding window (value is in minutes)\n  ",
-        "property_value" : "180",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-timeout-window",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.cluster.administrators",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : " hadoop",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.cluster.administrators",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.fileoutputcommitter.marksuccessfuljobs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.fileoutputcommitter.marksuccessfuljobs",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.shuffle.port",
-      "StackConfigurations" : {
-        "property_description" : "Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers.",
-        "property_value" : "8081",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.shuffle.port",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.tasktracker.tasks.sleeptime-before-sigkill",
-      "StackConfigurations" : {
-        "property_description" : "Normally, this is the amount of time before killing\n  processes, and the recommended-default is 5.000 seconds - a value of\n  5000 here.  In this case, we are using it solely to blast tasks before\n  killing them, and killing them very quickly (1/4 second) to guarantee\n  that we do not leave VMs around for later jobs.\n  ",
-        "property_value" : "250",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.tasktracker.tasks.sleeptime-before-sigkill",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.reduce.input.limit",
-      "StackConfigurations" : {
-        "property_description" : "The limit on the input size of the reduce. (This value\n  is 10 Gb.)  If the estimated input size of the reduce is greater than\n  this value, job is failed. A value of -1 means that there is no limit\n  set. ",
-        "property_value" : "10737418240",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.reduce.input.limit",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.tasks.speculative.execution",
-      "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some reduce tasks\n               may be executed in parallel.",
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.tasks.speculative.execution",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.blacklist.fault-bucket-width",
-      "StackConfigurations" : {
-        "property_description" : "\n    15-minute bucket size (value is in minutes)\n  ",
-        "property_value" : "15",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.blacklist.fault-bucket-width",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.record.percent",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : ".2",
-        "stack_version" : "2.0.1",
-        "property_name" : "io.sort.record.percent",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.history.server.embedded",
-      "StackConfigurations" : {
-        "property_description" : "Should job history server be embedded within Job tracker\nprocess",
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.history.server.embedded",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/hadoop.job.history.user.location",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "none",
-        "stack_version" : "2.0.1",
-        "property_name" : "hadoop.job.history.user.location",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.input.buffer.percent",
-      "StackConfigurations" : {
-        "property_description" : "The percentage of memory to be allocated from the maximum heap\n  size to storing map outputs during the shuffle.\n  ",
-        "property_value" : "0.7",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.shuffle.input.buffer.percent",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.handler.count",
-      "StackConfigurations" : {
-        "property_description" : "\n    The number of server threads for the JobTracker. This should be roughly\n    4% of the number of tasktracker nodes.\n    ",
-        "property_value" : "50",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.handler.count",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.split.metainfo.maxsize",
-      "StackConfigurations" : {
-        "property_description" : "If the size of the split metainfo file is larger than this, the JobTracker will fail the job during\n    initialize.\n   ",
-        "property_value" : "50000000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobtracker.split.metainfo.maxsize",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.interval",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "135000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.healthChecker.interval",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.output.compression.type",
-      "StackConfigurations" : {
-        "property_description" : "If the job outputs are to compressed as SequenceFiles, how should\n               they be compressed? Should be one of NONE, RECORD or BLOCK.\n  ",
-        "property_value" : "BLOCK",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.output.compression.type",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobtracker.staging.root.dir",
-      "StackConfigurations" : {
-        "property_description" : "The Path prefix for where the staging directories should be placed. The next level is always the user's\n   name. It is a path in the default file system.",
-        "property_value" : "/user",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobtracker.staging.root.dir",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-administer-jobs",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "*",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.default.acl-administer-jobs",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.child.root.logger",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "INFO,TLA",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.child.root.logger",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.done-dir",
-      "StackConfigurations" : {
-        "property_description" : "Directory where history files are managed by the MR JobHistory Server.",
-        "property_value" : "/mr-history/done",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobhistory.done-dir",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.map.tasks.speculative.execution",
-      "StackConfigurations" : {
-        "property_description" : "If true, then multiple instances of some map tasks\n               may be executed in parallel.",
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.map.tasks.speculative.execution",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.task.timeout",
-      "StackConfigurations" : {
-        "property_description" : "The number of milliseconds before a task will be\n  terminated if it neither reads an input, writes an output, nor\n  updates its status string.\n  ",
-        "property_value" : "600000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.task.timeout",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.names",
-      "StackConfigurations" : {
-        "property_description" : " Comma separated list of queues configured for this jobtracker.",
-        "property_value" : "default",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.names",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.tasktracker.group",
-      "StackConfigurations" : {
-        "property_description" : "The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.",
-        "property_value" : "hadoop",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.tasktracker.group",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.shuffle.merge.percent",
-      "StackConfigurations" : {
-        "property_description" : "The usage threshold at which an in-memory merge will be\n  initiated, expressed as a percentage of the total memory allocated to\n  storing in-memory map outputs, as defined by\n  mapred.job.shuffle.input.buffer.percent.\n  ",
-        "property_value" : "0.66",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.shuffle.merge.percent",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.check",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "10000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.retirejob.check",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.retirejob.interval",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "21600000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.retirejob.interval",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.completeuserjobs.maximum",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "5",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.completeuserjobs.maximum",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reduce.input.buffer.percent",
-      "StackConfigurations" : {
-        "property_description" : "The percentage of memory- relative to the maximum heap size- to\n  retain map outputs during the reduce. When the shuffle is concluded, any\n  remaining map outputs in memory must consume less than this threshold before\n  the reduce can begin.\n  ",
-        "property_value" : "0.0",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.reduce.input.buffer.percent",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapreduce.jobhistory.intermediate-done-dir",
-      "StackConfigurations" : {
-        "property_description" : "Directory where history files are written by MapReduce jobs.",
-        "property_value" : "/mr-history/tmp",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapreduce.jobhistory.intermediate-done-dir",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.inmem.merge.threshold",
-      "StackConfigurations" : {
-        "property_description" : "The threshold, in terms of the number of files\n  for the in-memory merge process. When we accumulate threshold number of files\n  we initiate the in-memory merge and spill to disk. A value of 0 or less than\n  0 indicates we want to DON'T have any threshold and instead depend only on\n  the ramfs's memory consumption to trigger the merge.\n  ",
-        "property_value" : "1000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.inmem.merge.threshold",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.reuse.jvm.num.tasks",
-      "StackConfigurations" : {
-        "property_description" : "\n    How many tasks to run per jvm. If set to -1, there is no limit\n  ",
-        "property_value" : "1",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.reuse.jvm.num.tasks",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.history.completed.location",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "/mapred/history/done",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.history.completed.location",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/jetty.connector",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "org.mortbay.jetty.nio.SelectChannelConnector",
-        "stack_version" : "2.0.1",
-        "property_name" : "jetty.connector",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.reduce.slowstart.completed.maps",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "0.05",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.reduce.slowstart.completed.maps",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/tasktracker.http.threads",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "50",
-        "stack_version" : "2.0.1",
-        "property_name" : "tasktracker.http.threads",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/io.sort.factor",
-      "StackConfigurations" : {
-        "property_description" : "No description",
-        "property_value" : "100",
-        "stack_version" : "2.0.1",
-        "property_name" : "io.sort.factor",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.job.tracker.persist.jobstatus.hours",
-      "StackConfigurations" : {
-        "property_description" : "The number of hours job status information is persisted in DFS.\n    The job status information will be available after it drops of the memory\n    queue and between jobtracker restarts. With a zero value the job status\n    information is not persisted at all in DFS.\n  ",
-        "property_value" : "1",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.job.tracker.persist.jobstatus.hours",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.jobtracker.restart.recover",
-      "StackConfigurations" : {
-        "property_description" : "\"true\" to enable (job) recovery upon restart,\n               \"false\" to start afresh\n    ",
-        "property_value" : "false",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.jobtracker.restart.recover",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.max.tracker.blacklists",
-      "StackConfigurations" : {
-        "property_description" : "\n    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted\n  ",
-        "property_value" : "16",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.max.tracker.blacklists",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.healthChecker.script.timeout",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "60000",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.healthChecker.script.timeout",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-site.xml"
-      }
-    },
-    {
-      "href" : "http://192.168.56.101:8080/api/v1/stacks2/HDP/versions/2.0.1/stackServices/MAPREDUCE2/configurations/mapred.queue.default.acl-submit-job",
-      "StackConfigurations" : {
-        "property_description" : null,
-        "property_value" : "*",
-        "stack_version" : "2.0.1",
-        "property_name" : "mapred.queue.default.acl-submit-job",
-        "service_name" : "MAPREDUCE2",
-        "stack_name" : "HDP",
-        "type" : "mapred-queue-acls.xml"
-      }
-    }
-  ]
-}

Некоторые файлы не были показаны из-за большого количества измененных файлов