Pārlūkot izejas kodu

AMBARI-1136 - Add gsInstaller resource provider. (Tom Beerbower via mahadev)

git-svn-id: https://svn.apache.org/repos/asf/incubator/ambari/trunk@1432294 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar 12 gadi atpakaļ
vecāks
revīzija
ff096b56b5
100 mainītis faili ar 2935 papildinājumiem un 550 dzēšanām
  1. 9 1
      .gitignore
  2. 2 0
      CHANGES.txt
  3. 15 0
      ambari-agent/conf/unix/ambari-agent
  4. 14 0
      ambari-agent/conf/unix/ambari-agent.ini
  5. 13 14
      ambari-agent/pom.xml
  6. 2 2
      ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb
  7. 14 20
      ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh
  8. 2 1
      ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb
  9. 16 8
      ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp
  10. 3 1
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp
  11. 13 5
      ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp
  12. 14 20
      ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh
  13. 5 5
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp
  14. 3 3
      ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp
  15. 6 3
      ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb
  16. 15 0
      ambari-agent/src/main/puppet/modules/hdp/manifests/.directory
  17. 15 3
      ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp
  18. 17 0
      ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp
  19. 5 3
      ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp
  20. 4 1
      ambari-agent/src/main/python/ambari_agent/puppetExecutor.py
  21. 7 7
      ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict
  22. 19 0
      ambari-agent/src/main/python/ambari_agent/site.pp
  23. 14 13
      ambari-agent/src/test/python/TestNetUtil.py
  24. 4 0
      ambari-agent/src/test/python/TestPuppetExecutor.py
  25. 4 9
      ambari-agent/src/test/python/examples/debug_testcase_example.py
  26. 5 5
      ambari-project/pom.xml
  27. 1 0
      ambari-server/conf/unix/ambari.properties
  28. 124 0
      ambari-server/docs/api/v1/clusters-cluster.md
  29. 39 0
      ambari-server/docs/api/v1/clusters.md
  30. 74 0
      ambari-server/docs/api/v1/components-component.md
  31. 65 0
      ambari-server/docs/api/v1/components.md
  32. 29 0
      ambari-server/docs/api/v1/host-component.md
  33. 30 0
      ambari-server/docs/api/v1/host-components.md
  34. 30 0
      ambari-server/docs/api/v1/hosts-host.md
  35. 29 0
      ambari-server/docs/api/v1/hosts.md
  36. 172 0
      ambari-server/docs/api/v1/index.md
  37. 70 0
      ambari-server/docs/api/v1/services-service.md
  38. 55 0
      ambari-server/docs/api/v1/services.md
  39. 11 12
      ambari-server/pom.xml
  40. 2 2
      ambari-server/src/main/java/org/apache/ambari/eventdb/db/PostgresConnector.java
  41. 5 2
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java
  42. 6 1
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java
  43. 12 0
      ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java
  44. 6 4
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java
  45. 6 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java
  46. 95 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/HostsMap.java
  47. 172 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java
  48. 73 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java
  49. 84 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java
  50. 89 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java
  51. 82 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java
  52. 52 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java
  53. 44 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java
  54. 148 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java
  55. 80 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java
  56. 346 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java
  57. 8 304
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java
  58. 21 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java
  59. 7 17
      ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCProviderModule.java
  60. 7 0
      ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java
  61. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigMappingDAO.java
  62. 22 12
      ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java
  63. 35 12
      ambari-server/src/main/python/ambari-server.py
  64. 8 8
      ambari-server/src/main/resources/stacks/HDP/1.2.0/repos/repoinfo.xml
  65. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/GANGLIA/metainfo.xml
  66. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/metainfo.xml
  67. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HCATALOG/metainfo.xml
  68. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/metainfo.xml
  69. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/metainfo.xml
  70. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/metainfo.xml
  71. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml
  72. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/metainfo.xml
  73. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/metainfo.xml
  74. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/SQOOP/metainfo.xml
  75. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/metainfo.xml
  76. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml
  77. 8 8
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml
  78. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml
  79. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml
  80. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml
  81. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml
  82. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml
  83. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml
  84. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml
  85. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml
  86. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml
  87. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml
  88. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml
  89. 1 1
      ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml
  90. 4 1
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java
  91. 3 2
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java
  92. 4 2
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java
  93. 1 0
      ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java
  94. 12 9
      ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java
  95. 3 3
      ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java
  96. 100 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java
  97. 100 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java
  98. 95 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java
  99. 101 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java
  100. 100 0
      ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java

+ 9 - 1
.gitignore

@@ -5,7 +5,15 @@
 .iml/
 .DS_Store
 target
+/ambari-server/derby.log
+/ambari-server/pass.txt
 /ambari-web/public/
 /ambari-web/node_modules/
 *.pyc
-*.py~
+*.py~
+*.iml
+.hg
+.hgignore
+.hgtags
+derby.log
+pass.txt

+ 2 - 0
CHANGES.txt

@@ -19,6 +19,8 @@ should be listed by their full name.
  AMBARI-1114. BootStrap fails but the api says thats its done and exit status
  is 0. (Nate Cole via mahadev)
 
+  AMBARI-1136 - Add gsInstaller resource provider. (Tom Beerbower via mahadev)
+
  BUG FIXES
 
  AMBARI-1126. Change SUSE lzo dependency to only lzo-devel. (nate cole via

+ 15 - 0
ambari-agent/conf/unix/ambari-agent

@@ -1,4 +1,19 @@
 #!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
 # description: ambari-agent daemon
 # processname: ambari-agent
 

+ 14 - 0
ambari-agent/conf/unix/ambari-agent.ini

@@ -1,3 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific
+
 [server]
 hostname=localhost
 url_port=8440

+ 13 - 14
ambari-agent/pom.xml

@@ -280,20 +280,19 @@
           </execution>
         </executions>
       </plugin>
-        <plugin>
-            <groupId>org.apache.rat</groupId>
-            <artifactId>apache-rat-plugin</artifactId>
-            <version>0.8</version>
-            <configuration>
-                <excludes>
-                    <exclude>src/test/python/dummy*.txt</exclude>
-                </excludes>
-                <includes>
-                    <include>pom.xml</include>
-                </includes>
-            </configuration>
-        </plugin>
-
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/test/python/dummy*.txt</exclude>
+            <exclude>src/main/puppet/modules/stdlib/**</exclude>
+            <exclude>**/*.erb</exclude>
+            <exclude>src/main/python/ambari_agent/imports.txt</exclude>
+            <exclude>**/*.json</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
     </plugins>
     <extensions>
       <extension>

+ 2 - 2
ambari-agent/src/main/puppet/modules/hdp-hadoop/templates/hadoop-env.sh.erb

@@ -32,7 +32,7 @@ export HADOOP_HOME_WARN_SUPPRESS=1
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-<%=scope.function_hdp_template_var("conf_dir")%>}
 
 # The maximum amount of heap to use, in MB. Default is 1000.
-#export HADOOP_HEAPSIZE=
+export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("hadoop_heapsize")%>"
 
 export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms<%=scope.function_hdp_template_var("namenode_heapsize")%>"
 
@@ -45,7 +45,7 @@ HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC
 
 HADOOP_TASKTRACKER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("ttnode_heapsize")%> -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
 HADOOP_DATANODE_OPTS="-Xmx<%=scope.function_hdp_template_var("dtnode_heapsize")%> -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%> ${HADOOP_BALANCER_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx<%=scope.function_hdp_template_var("hadoop_heapsize")%>m ${HADOOP_BALANCER_OPTS}"
 
 export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile=<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/hs_err_pid%p.log -XX:NewSize=<%=scope.function_hdp_template_var("namenode_opt_newsize")%> -XX:MaxNewSize=<%=scope.function_hdp_template_var("namenode_opt_maxnewsize")%> -Xloggc:<%=scope.function_hdp_template_var("hdfs_log_dir_prefix")%>/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx<%=scope.function_hdp_template_var("namenode_heapsize")%> -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
 

+ 14 - 20
ambari-agent/src/main/puppet/modules/hdp-hcat/files/pigSmoke.sh

@@ -1,23 +1,17 @@
-/*
- *
- * licensed to the apache software foundation (asf) under one
- * or more contributor license agreements.  see the notice file
- * distributed with this work for additional information
- * regarding copyright ownership.  the asf licenses this file
- * to you under the apache license, version 2.0 (the
- * "license"); you may not use this file except in compliance
- * with the license.  you may obtain a copy of the license at
- *
- *   http://www.apache.org/licenses/license-2.0
- *
- * unless required by applicable law or agreed to in writing,
- * software distributed under the license is distributed on an
- * "as is" basis, without warranties or conditions of any
- * kind, either express or implied.  see the license for the
- * specific language governing permissions and limitations
- * under the license.
- *
- */
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
 
 A = load 'passwd' using PigStorage(':');
 B = foreach A generate \$0 as id;

+ 2 - 1
ambari-agent/src/main/puppet/modules/hdp-hive/templates/hive-env.sh.erb

@@ -36,7 +36,8 @@
 
 # The heap size of the jvm stared by hive shell script can be controlled via:
 
-export HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+export HADOOP_HEAPSIZE="<%=scope.function_hdp_template_var("::hdp::params::hadoop_heapsize")%>"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
 
 # Larger heap size may be required when running queries over large number of files or partitions.
 # By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be

+ 16 - 8
ambari-agent/src/main/puppet/modules/hdp-nagios/manifests/server/packages.pp

@@ -31,13 +31,7 @@ class hdp-nagios::server::packages(
   
 
   
-  if ($service_state == 'installed_and_configured') {
-    package{'nagios-plugins-process-old':
-      name   => 'nagios-plugins',
-      ensure => absent}
-  }
-	
-  hdp::package { 'nagios-server': 
+  hdp::package { 'nagios-server':
     ensure      => present,
     java_needed => false
   }
@@ -65,8 +59,22 @@ class hdp-nagios::server::packages(
   
 debug("## state: $service_state")
   if ($service_state == 'installed_and_configured') {
+
+    hdp::package::remove_pkg { 'hdp_mon_nagios_addons':
+      package_type => 'hdp_mon_nagios_addons'
+    }
+
+    hdp::package::remove_pkg { 'nagios-plugins':
+      package_type => 'nagios-plugins'
+    }
+
+    hdp::package::remove_pkg { 'nagios':
+      package_type => 'nagios'
+    }
+
     debug("##Adding removing dep")
-    Package['nagios-plugins-process-old'] -> Hdp::Package['nagios-plugins']
+    # Removing conflicting packages. Names of packages being removed are hardcoded and not resolved via hdp::params
+    Hdp::Package::Remove_pkg['hdp_mon_nagios_addons'] -> Hdp::Package::Remove_pkg['nagios-plugins'] -> Hdp::Package::Remove_pkg['nagios'] -> Hdp::Package['nagios-plugins']
   }
 
   Hdp::Package['nagios-plugins'] -> Hdp::Package['nagios-server'] -> Hdp::Package['nagios-fping'] -> Hdp::Package['nagios-addons'] -> Hdp::Package['nagios-php-pecl-json']

+ 3 - 1
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/params.pp

@@ -42,7 +42,9 @@ class hdp-oozie::params() inherits hdp::params
   $oozie_tmp_dir = hdp_default("hadoop/oozie-env/oozie_tmp_dir","/var/tmp/oozie")
 
   $oozie_lib_dir = hdp_default("hadoop/oozie-env/oozie_lib_dir","/var/lib/oozie/")
-
+  
+  $oozie_webapps_dir = hdp_default("hadoop/oozie-env/oozie_webapps_dir","/var/lib/oozie/oozie-server/webapps/")
+  
   ### oozie-site
   $keytab_path = hdp_default("keytab_path","/etc/security/keytabs")
   if ($security_enabled == true) {

+ 13 - 5
ambari-agent/src/main/puppet/modules/hdp-oozie/manifests/service.pp

@@ -45,8 +45,8 @@ class hdp-oozie::service(
   $cmd3 =  "cd /usr/lib/oozie && chown ${user}:hadoop ${oozie_tmp}"    
   $cmd4 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 $jar_location -extjs $ext_js_path $lzo_jar_suffix"
   $cmd5 =  "cd ${oozie_tmp} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run ; echo 0"
-  $cmd6 =  "hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/${user}/share"
-  $cmd7 = "/usr/lib/oozie/bin/oozie-start.sh"
+  $cmd6 =  "su - ${user} -c 'hadoop dfs -put /usr/lib/oozie/share share ; hadoop dfs -chmod -R 755 /user/${user}/share'"
+  #$cmd7 = "/usr/lib/oozie/bin/oozie-start.sh"
 
   if ($ensure == 'installed_and_configured') {
     $sh_cmds = [$cmd1, $cmd2, $cmd3]
@@ -66,6 +66,7 @@ class hdp-oozie::service(
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_tmp_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_data_dir : }
   hdp-oozie::service::directory { $hdp-oozie::params::oozie_lib_dir : }
+  hdp-oozie::service::directory { $hdp-oozie::params::oozie_webapps_dir : }
 
   anchor{'hdp-oozie::service::begin':} -> Hdp-oozie::Service::Directory<||> -> anchor{'hdp-oozie::service::end':}
   
@@ -74,9 +75,16 @@ class hdp-oozie::service(
     hdp-oozie::service::exec_user{$user_cmds:}
     Hdp-oozie::Service::Directory<||> -> Hdp-oozie::Service::Exec_sh[$cmd1] -> Hdp-oozie::Service::Exec_sh[$cmd2] ->Hdp-oozie::Service::Exec_sh[$cmd3] -> Hdp-oozie::Service::Exec_user[$cmd4] ->Hdp-oozie::Service::Exec_user[$cmd5] -> Anchor['hdp-oozie::service::end']
   } elsif ($ensure == 'running') {
-    $user_cmds = [$cmd6, $cmd7]
-    hdp-oozie::service::exec_user{$user_cmds:}
-    Hdp-oozie::Service::Exec_user[$cmd6] -> Hdp-oozie::Service::Exec_user[$cmd7] -> Anchor['hdp-oozie::service::end']
+    hdp::exec { "exec $cmd6" :
+      command => $cmd6,
+      unless => "hadoop dfs -ls /user/oozie/share | awk 'BEGIN {count=0;} /share/ {count++} END {if (count > 0) {exit 0} else {exit 1}}'"
+    }
+    hdp::exec { "exec $start_cmd":
+      command => $start_cmd,
+      unless  => $no_op_test,
+      initial_wait => $initial_wait,
+      require => Exec["exec $cmd6"]
+    }
   } elsif ($ensure == 'stopped') {
     hdp::exec { "exec $stop_cmd":
       command => $stop_cmd,

+ 14 - 20
ambari-agent/src/main/puppet/modules/hdp-pig/files/pigSmoke.sh

@@ -1,23 +1,17 @@
-/*
- *
- * licensed to the apache software foundation (asf) under one
- * or more contributor license agreements.  see the notice file
- * distributed with this work for additional information
- * regarding copyright ownership.  the asf licenses this file
- * to you under the apache license, version 2.0 (the
- * "license"); you may not use this file except in compliance
- * with the license.  you may obtain a copy of the license at
- *
- *   http://www.apache.org/licenses/license-2.0
- *
- * unless required by applicable law or agreed to in writing,
- * software distributed under the license is distributed on an
- * "as is" basis, without warranties or conditions of any
- * kind, either express or implied.  see the license for the
- * specific language governing permissions and limitations
- * under the license.
- *
- */
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
 
 A = load 'passwd' using PigStorage(':');
 B = foreach A generate \$0 as id;

+ 5 - 5
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/init.pp

@@ -42,7 +42,7 @@ class hdp-templeton(
     $size = 32
   }
 
-  $templeton_user = $hdp-templeton::params::templeton_user
+  $webhcat_user = $hdp-templeton::params::webhcat_user
   $templeton_config_dir = $hdp-templeton::params::conf_dir
 
   if ($service_state == 'uninstalled') {
@@ -64,7 +64,7 @@ class hdp-templeton(
     class { hdp-templeton::download-hive-tar: }
     class { hdp-templeton::download-pig-tar: }
 
-    hdp::user{ $templeton_user:}
+    hdp::user{ $webhcat_user:}
 
     hdp::directory { $templeton_config_dir: 
       service_state => $service_state,
@@ -73,10 +73,10 @@ class hdp-templeton(
 
     hdp-templeton::configfile { ['webhcat-env.sh']: }
 
-    anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::User[$templeton_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
+    anchor { 'hdp-templeton::begin': } -> Hdp::Package['webhcat'] -> Hdp::User[$webhcat_user] -> Hdp::Directory[$templeton_config_dir] -> Hdp-templeton::Configfile<||> ->  anchor { 'hdp-templeton::end': }
 
      if ($server == true ) { 
-      Hdp::Package['webhcat'] -> Hdp::User[$templeton_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
+      Hdp::Package['webhcat'] -> Hdp::User[$webhcat_user] ->   Class['hdp-templeton::download-hive-tar'] -> Class['hdp-templeton::download-pig-tar'] -> Anchor['hdp-templeton::end']
      }
   }
 }
@@ -88,7 +88,7 @@ define hdp-templeton::configfile(
 {
   hdp::configfile { "${hdp-templeton::params::conf_dir}/${name}":
     component       => 'templeton',
-    owner           => $hdp-templeton::params::templeton_user,
+    owner           => $hdp-templeton::params::webhcat_user,
     mode            => $mode
   }
 }

+ 3 - 3
ambari-agent/src/main/puppet/modules/hdp-templeton/manifests/service.pp

@@ -25,7 +25,7 @@ class hdp-templeton::service(
 {
   include $hdp-templeton::params
   
-  $user = "$hdp-templeton::params::templeton_user"
+  $user = "$hdp-templeton::params::webhcat_user"
   $hadoop_home = $hdp-templeton::params::hadoop_prefix
   $cmd = "env HADOOP_HOME=${hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh"
   $pid_file = "${hdp-templeton::params::templeton_pid_dir}/webhcat.pid" 
@@ -41,7 +41,7 @@ class hdp-templeton::service(
   }
 
   hdp-templeton::service::directory { $hdp-templeton::params::templeton_pid_dir : }
-  hdp-templeton::service::directory { $hdp-templeton::params::templeton_log_dir : }
+  hdp-templeton::service::directory { $hdp-templeton::params::hcat_log_dir : }
 
   anchor{'hdp-templeton::service::begin':} -> Hdp-templeton::Service::Directory<||> -> anchor{'hdp-templeton::service::end':}
   
@@ -58,7 +58,7 @@ class hdp-templeton::service(
 define hdp-templeton::service::directory()
 {
   hdp::directory_recursive_create { $name: 
-    owner => $hdp-templeton::params::templeton_user,
+    owner => $hdp-templeton::params::webhcat_user,
     mode => '0755',
     service_state => $ensure,
     force => true

+ 6 - 3
ambari-agent/src/main/puppet/modules/hdp-templeton/templates/webhcat-env.sh.erb

@@ -23,13 +23,16 @@
 # The file containing the running pid
 PID_FILE=<%=scope.function_hdp_template_var("templeton_pid_dir")%>/webhcat.pid
 
-TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("templeton_log_dir")%>/
+TEMPLETON_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
+
+
+WEBHCAT_LOG_DIR=<%=scope.function_hdp_template_var("hcat_log_dir")%>/
 
 # The console error log
-ERROR_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/webhcat-console-error.log
+ERROR_LOG=<%=scope.function_hdp_template_var("hcat_log_dir")%>/webhcat-console-error.log
 
 # The console log
-CONSOLE_LOG=<%=scope.function_hdp_template_var("templeton_log_dir")%>/webhcat-console.log
+CONSOLE_LOG=<%=scope.function_hdp_template_var("hcat_log_dir")%>/webhcat-console.log
 
 #TEMPLETON_JAR=<%=scope.function_hdp_template_var("templeton_jar_name")%>
 

+ 15 - 0
ambari-agent/src/main/puppet/modules/hdp/manifests/.directory

@@ -1,3 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
 [Dolphin]
 Timestamp=2011,3,16,9,26,14
 ViewMode=1

+ 15 - 3
ambari-agent/src/main/puppet/modules/hdp/manifests/init.pp

@@ -102,11 +102,18 @@ class hdp::create_smoke_user()
   $smoke_user = $hdp::params::smokeuser
   $security_enabled = $hdp::params::security_enabled
 
-  group { $smoke_group :
+  
+  if ( $smoke_group != $proxyuser_group) {
+    group { $smoke_group :
+      ensure => present
+    }
+  }
+
+  group { $proxyuser_group :
     ensure => present
   }
 
-  hdp::user { $smoke_user:}
+  hdp::user { $smoke_user: gid => $proxyuser_group}
 
   $cmd = "usermod -g  $smoke_group  $smoke_user"
   $check_group_cmd = "id -gn $smoke_user | grep $smoke_group"
@@ -126,7 +133,11 @@ class hdp::create_smoke_user()
      }
   }
 
-  Group[$smoke_group] -> Hdp::User[$smoke_user] -> Hdp::Exec[$cmd] 
+  if ( $smoke_group != $proxyuser_group) {
+    Group[$smoke_group] -> Group[$proxyuser_group] -> Hdp::User[$smoke_user] -> Hdp::Exec[$cmd]
+  } else {
+    Group[$smoke_group] -> Hdp::User[$smoke_user] -> Hdp::Exec[$cmd]
+  }
 }
 
 
@@ -168,6 +179,7 @@ define hdp::user(
     }
   }
 }
+
      
 define hdp::directory(
   $owner = $hdp::params::hadoop_user,

+ 17 - 0
ambari-agent/src/main/puppet/modules/hdp/manifests/package.pp

@@ -116,3 +116,20 @@ define hdp::package::process_pkg(
   }
 }
 
+# Removes the specified package using shell command appropriate for current OS type.
+# Method DOES NOT resolve package name via hdp::params.
+# If package does not exist or is not installed, command does nothing.
+define hdp::package::remove_pkg(
+    $package_type,
+  )
+{
+
+  # TODO: For non-rpm based systems, provide appropriate command
+  exec { "remove_package ${package_type}":
+    path    => "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+    command => $hdp::params::hdp_os_type ? {
+      default => "rpm -e --allmatches ${package_type} ; true"
+    },
+  }
+
+}

+ 5 - 3
ambari-agent/src/main/puppet/modules/hdp/manifests/params.pp

@@ -247,7 +247,7 @@ class hdp::params()
       64 => 'nagios-3.2.3'
     },
     nagios-plugins => {
-      64 => 'nagios-plugins-1.4.9'
+      64 => 'nagios-plugins'
     },
     nagios-fping => {
       64 =>'fping'
@@ -439,7 +439,7 @@ class hdp::params()
     },
 
     lzo => {
-      'ALL' => {'ALL' => ['lzo', 'lzo.i686', 'lzo-devel', 'lzo-devel.i686'],
+      'ALL' => {'ALL' => ['lzo', 'lzo-devel'],
                 suse => ['lzo-devel']},
     },
 
@@ -515,7 +515,9 @@ class hdp::params()
       64 => {'ALL' => $NOTHING,
              suse => 'php5-json',
              centos6 => $NOTHING,
-             rhel6 => $NOTHING}
+             redhat6 => $NOTHING,
+             centos5 => 'php-pecl-json.x86_64',
+             redhat5 => 'php-pecl-json.x86_64'}
     },
 
     ganglia-server => {

+ 4 - 1
ambari-agent/src/main/python/ambari_agent/puppetExecutor.py

@@ -26,6 +26,7 @@ from RepoInstaller import RepoInstaller
 import pprint, threading
 from Grep import Grep
 from threading import Thread
+import shell
 import traceback
 
 logger = logging.getLogger()
@@ -212,10 +213,12 @@ class puppetExecutor:
     self.event.wait(self.PUPPET_TIMEOUT_SECONDS)
     if puppet.returncode is None:
       logger.error("Task timed out and will be killed")
-      puppet.terminate()
+      self.runShellKillPgrp(puppet)
       self.last_puppet_has_been_killed = True
     pass
 
+  def runShellKillPgrp(self, puppet):
+    shell.killprocessgrp(puppet.pid)
 
 def main():
   logging.basicConfig(level=logging.DEBUG)    

+ 7 - 7
ambari-agent/src/main/python/ambari_agent/servicesToPidNames.dict

@@ -13,19 +13,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-NAMENODE=hadoop-[a-z_]+-namenode.pid$
-SECONDARY_NAMENODE=hadoop-[a-z_]+-secondarynamenode.pid$
-DATANODE=hadoop-[a-z_]+-datanode.pid$
-JOBTRACKER=hadoop-[a-z_]+-jobtracker.pid$
-TASKTRACKER=hadoop-[a-z_]+-tasktracker.pid$
+NAMENODE=hadoop-[A-Za-z0-9_]+-namenode.pid$
+SECONDARY_NAMENODE=hadoop-[A-Za-z0-9_]+-secondarynamenode.pid$
+DATANODE=hadoop-[A-Za-z0-9_]+-datanode.pid$
+JOBTRACKER=hadoop-[A-Za-z0-9_]+-jobtracker.pid$
+TASKTRACKER=hadoop-[A-Za-z0-9_]+-tasktracker.pid$
 OOZIE_SERVER=oozie.pid
 ZOOKEEPER_SERVER=zookeeper_server.pid
 TEMPLETON_SERVER=templeton.pid
 NAGIOS_SERVER=nagios.pid
 GANGLIA_SERVER=gmetad.pid
 GANGLIA_MONITOR=gmond.pid
-HBASE_MASTER=hbase-hbase-master.pid
-HBASE_REGIONSERVER=hbase-hbase-regionserver.pid
+HBASE_MASTER=hbase-[A-Za-z0-9_]+-master.pid
+HBASE_REGIONSERVER=hbase-[A-Za-z0-9_]+-regionserver.pid
 NAGIOS_SERVER=nagios.pid
 HCATALOG_SERVER=hcat.pid
 KERBEROS_SERVER=kadmind.pid

+ 19 - 0
ambari-agent/src/main/python/ambari_agent/site.pp

@@ -1,3 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hadoop/manifests/*.pp'
 import '/media/sf_/home/mahadev/workspace/ambari-workspace/ambari-git/ambari-agent/src/main/puppet/modules/hdp-hbase/manifests/*.pp'

+ 14 - 13
ambari-agent/src/test/python/TestNetUtil.py

@@ -49,19 +49,20 @@ class TestNetUtil(TestCase):
       self.defaulttimeout = socket.getdefaulttimeout()
 
 
-  def test_url_checks(self):
-    netutil = NetUtil()
-    if hasattr(socket, 'setdefaulttimeout'):
-      # Set the default timeout on sockets
-      socket.setdefaulttimeout(1)
-    self.assertEquals(netutil.checkURL('http://' + NON_EXISTING_DOMAIN), False, "Not existing domain")
-    self.assertEquals(netutil.checkURL(BAD_URL), False, "Bad url")
-    self.assertEquals(netutil.checkURL('http://192.168.253.177'), False, "Not reachable IP")
-    if hasattr(socket, 'setdefaulttimeout'):
-      # Set the default timeout on sockets
-      socket.setdefaulttimeout(20)
-    self.assertEquals(netutil.checkURL('http://www.iana.org/domains/example/'), True, "Good url - HTTP code 200")
-    self.assertEquals(netutil.checkURL('https://www.iana.org/domains/example/'), True, "Good HTTPS url - HTTP code 200")
+# Test was failing: BUG-3112
+#  def test_url_checks(self):
+#    netutil = NetUtil()
+#    if hasattr(socket, 'setdefaulttimeout'):
+#      # Set the default timeout on sockets
+#      socket.setdefaulttimeout(1)
+#    self.assertEquals(netutil.checkURL('http://' + NON_EXISTING_DOMAIN), False, "Not existing domain")
+#    self.assertEquals(netutil.checkURL(BAD_URL), False, "Bad url")
+#    self.assertEquals(netutil.checkURL('http://192.168.253.177'), False, "Not reachable IP")
+#    if hasattr(socket, 'setdefaulttimeout'):
+#      # Set the default timeout on sockets
+#      socket.setdefaulttimeout(20)
+#    self.assertEquals(netutil.checkURL('http://www.iana.org/domains/example/'), True, "Good url - HTTP code 200")
+#    self.assertEquals(netutil.checkURL('https://www.iana.org/domains/example/'), True, "Good HTTPS url - HTTP code 200")
 
 
   def test_registration_retries(self):

+ 4 - 0
ambari-agent/src/test/python/TestPuppetExecutor.py

@@ -143,6 +143,9 @@ class TestPuppetExecutor(TestCase):
       self.subprocess_mockup.tmperr = tmperr
       return self.subprocess_mockup
 
+    def runShellKillPgrp(self, puppet):
+      puppet.terminate()  # note: In real code, subprocess.terminate() is not called
+      pass
 
   class Subprocess_mockup():
 
@@ -154,6 +157,7 @@ class TestPuppetExecutor(TestCase):
     was_terminated = False
     tmpout = None
     tmperr = None
+    pid=-1
 
     def communicate(self):
       self.started_event.set()

+ 4 - 9
ambari-agent/src/test/python/examples/debug_testcase_example.py

@@ -26,18 +26,16 @@ from ambari_agent.ActionQueue import ActionQueue
 from ambari_agent import AmbariConfig
 from ambari_agent.NetUtil import NetUtil
 import socket, ConfigParser, logging
-import os, pprint, json, sys
+import os, pprint, json, sys, unittest
 from threading import Thread
 import time
 import Queue
 
-
-BAD_URL = 'http://localhost:54222/badurl/'
 logger = logging.getLogger()
 
-class TestController():
+class TestController(TestCase):
 
-# This file should be put to ambari-agent/src/main/python/debug_testcase_example.py.
+# This file should be put to ambari-agent/src/main/python/ambari-agent/debug_testcase_example.py.
 # After installing python plugin and adjusting test,
 # it may be run in IntelliJ IDEA debugger
 
@@ -68,10 +66,7 @@ def main(argv=None):
   stream_handler.setFormatter(formatter)
   logger.addHandler(stream_handler)
 
-  test = TestController()
-  test.setUp()
-  test.test_custom()
-  test.tearDown()
+  unittest.main()
 
 if __name__ == '__main__':
   main()

+ 5 - 5
ambari-project/pom.xml

@@ -352,11 +352,6 @@
   <build>
     <pluginManagement>
       <plugins>
-        <plugin>
-          <groupId>org.apache.rat</groupId>
-          <artifactId>apache-rat-plugin</artifactId>
-          <version>0.8</version>
-        </plugin>
         <plugin>
           <groupId>org.apache.maven.plugins</groupId>
           <artifactId>maven-surefire-plugin</artifactId>
@@ -365,6 +360,11 @@
       </plugins>
     </pluginManagement>
     <plugins>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration combine.self="override"/>
+      </plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-surefire-plugin</artifactId>

+ 1 - 0
ambari-server/conf/unix/ambari.properties

@@ -15,6 +15,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
 security.server.keys_dir = /var/lib/ambari-server/keys
 resources.dir = /var/lib/ambari-server/resources
 jdk.url=http://public-repo-1.hortonworks.com/ARTIFACTS/jdk-6u31-linux-x64.bin

+ 124 - 0
ambari-server/docs/api/v1/clusters-cluster.md

@@ -0,0 +1,124 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Cluster Information
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns information for the specified cluster identified by ":name"
+
+    GET /clusters/:name
+
+**Response**
+
+    200 OK
+    {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
+      "Clusters" : {
+        "cluster_name" : "MyCluster",
+        "cluster_id" : 1,
+        "version" : "HDP-1.2.0"
+      },
+      "services" : [
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/NAGIOS",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "NAGIOS"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HCATALOG",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "HCATALOG"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/PIG",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "PIG"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/MAPREDUCE",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "MAPREDUCE"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/GANGLIA",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "GANGLIA"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HIVE",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "HIVE"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS",
+        "ServiceInfo" : {
+          "cluster_name" : "MyIE9",
+          "service_name" : "HDFS"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/ZOOKEEPER",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "ZOOKEEPER"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HBASE",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "HBASE"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/OOZIE",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "OOZIE"
+          }
+        } ],
+    "hosts" : [
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host",
+      "Hosts" : {
+        "cluster_name" : "MyCluster",
+        "host_name" : "some.cluster.host"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/another.cluster.host",
+      "Hosts" : {
+        "cluster_name" : "MyCluster",
+        "host_name" : "another.cluster.host"
+        }
+      } ]
+    }
+

+ 39 - 0
ambari-server/docs/api/v1/clusters.md

@@ -0,0 +1,39 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+List Clusters
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns a collection of the currently configured clusters.
+
+    GET /clusters
+
+**Response**
+
+    200 OK
+    {
+      "href" : "http://your.ambari.server/api/v1/clusters",
+      "items" : [ {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster",
+        "Clusters" : {
+          "cluster_name" : "MyCluster",
+          "version" : "HDP-1.2.0"
+        }
+      } ]
+    }

+ 74 - 0
ambari-server/docs/api/v1/components-component.md

@@ -0,0 +1,74 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Component Information
+=====
+
+[Back to Resources](index.md#resources)
+
+Refers to a specific component identified by ":componentName" for a given service.
+
+    GET /clusters/:name/services/:serviceName/components/:componentName
+
+**Response**
+
+    200 OK
+    {
+    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
+    "metrics" : {
+      "rpc" : {
+        ...
+      },
+      "dfs" : {
+        "datanode" : {
+          ...
+        }
+      },
+      "disk" : {
+        ...
+      },
+      "cpu" : {
+        ...
+      },
+      "jvm" : {
+        ...
+      },
+      "load" : {
+        ...
+      },
+      "memory" : {
+        ...
+      },
+      "network" : {
+        ...
+      },
+    },
+    "ServiceComponentInfo" : {
+      "cluster_name" : "MyCluster",
+      "component_name" : "DATANODE",
+      "service_name" : "HDFS"
+    },
+    "host_components" : [
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/hosts/some.cluster.host/host_components/DATANODE",
+      "HostRoles" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "DATANODE",
+        "host_name" : "some.cluster.host"
+        }
+      } ]
+    }

+ 65 - 0
ambari-server/docs/api/v1/components.md

@@ -0,0 +1,65 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Service Components
+=====
+
+[Back to Resources](index.md#resources)
+
+Refers to a collection of all components for a given service.
+
+    GET /clusters/:name/services/:serviceName/components
+
+**Response**
+
+    200 OK
+    {
+    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components",
+    "items" : [
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "DATANODE",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/SECONDARY_NAMENODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "SECONDARY_NAMENODE",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "NAMENODE",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/HDFS_CLIENT",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "HDFS_CLIENT",
+        "service_name" : "HDFS"
+        }
+      } ]
+    }

+ 29 - 0
ambari-server/docs/api/v1/host-component.md

@@ -0,0 +1,29 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Host Component Information
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns information for a specific role on the given host.
+
+    GET /clusters/:name/hosts/:hostName/host_components/:componentName
+
+**Response**
+
+    200 OK

+ 30 - 0
ambari-server/docs/api/v1/host-components.md

@@ -0,0 +1,30 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+List Host Components
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns a collection of components running on a given host.
+
+    GET /clusters/:name/hosts/:hostName/host_components
+
+**Response**
+
+    200 OK
+

+ 30 - 0
ambari-server/docs/api/v1/hosts-host.md

@@ -0,0 +1,30 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Host Information
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns information about a single host in a given cluster.
+
+    GET /clusters/:name/hosts/:hostName
+
+**Response**
+
+    200 OK
+

+ 29 - 0
ambari-server/docs/api/v1/hosts.md

@@ -0,0 +1,29 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+List Hosts
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns a collection of all hosts in a given cluster.
+
+    GET /clusters/:name/hosts
+
+**Response**
+
+    200 OK

+ 172 - 0
ambari-server/docs/api/v1/index.md

@@ -0,0 +1,172 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+Ambari API Reference v1
+=========
+
+The Ambari API provides access to monitoring and metrics information of a Apache Hadoop cluster. This document describes the resources used in the Ambari API and is intended for developers who want to integrate with Ambari.
+
+- [Release Version](#release-version)
+- [Authentication](#authentication)
+- [Resources](#resources)
+- [Partial Response](#partial-response)
+- [Query Parameters](#query-parameters)
+- [Errors](#errors)
+
+
+Release Version
+----
+_Last Updated December 28, 2012_
+
+Authentication
+----
+
+The operations you perform against the Ambari API require authentication. Your access to the API requires the use of **Basic Authentication**. To use Basic Authentication, you need to send the **Authorization: Basic** header with your requests. For example, this can be handled when using curl and the --user option.
+
+    curl --user name:password http://{your.ambari.server}/api/v1/clusters
+
+_Note: The authentication method and source is configured at the Ambari Server. Changing and configuring the authentication method and source is not covered in this document._
+
+Resources
+----
+
+There are 2 types of resources in the Ambari API:
+
+- **Collection Resource:** This resource type refers to a collection of resources, rather than any specific resource. For example:
+
+        /clusters  
+
+  _Returns a collection of clusters_
+
+- **Instance Resource:** This resource type refers to a single specific resource. For example:
+
+        /clusters/MyCluster
+
+  _Refers to the cluster resource identified by the id "MyCluster"_
+
+### Clusters
+
+- [List clusters](clusters.md)
+- [View cluster information](clusters-cluster.md)
+
+### Services
+
+- [List services](services.md)
+- [View service information](services-service.md)
+- [View service components](components.md)
+- [View component information](components-component.md)
+
+### Hosts
+
+- [List hosts](hosts.md)
+- [View host information](hosts-host.md)
+- [List host components](host-components.md)
+- [View host component information](host-component.md)
+
+Partial Response
+----
+
+A mechanism used to control which fields are returned by a query.  Partial response can be used to restrict which fields are returned and additionally, it allows a query to reach down and return data from sub-resources.  The keyword “fields” is used to specify a partial response.  Only the fields listed will be returned to the client.  To specify sub-elements, use the notation “a/b/c”.  The wildcard ‘*’ can be used to show all fields for a resource.  This can be combined to provide ‘expand’ functionality for sub-components.  Some fields are always returned for a resource regardless of the specified partial response fields.  These fields are the fields which uniquely identify the resource.  This would be the primary id field of the resource and the foreign keys to the primary id fields of all ancestors of the resource.
+
+**Example: Partial Response (Name and All metrics)*
+
+    GET    /api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE?fields=name,metrics
+
+
+    200 OK
+    {
+      “href” :”.../api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE?fields=name,metrics”,
+      “name”: “NAMENODE”,
+      “metrics”: [
+        {
+        ...
+        }
+      ]
+    }
+
+Query Parameters
+----
+
+This mechanism limits which data is returned by a query based on a predicate(s). Providing query parameters does not result in any link expansion in the data that is returned to the client although it may result in expansion on the server to apply predicates on sub-objects.
+
+_Note: Only applies to collection resources. And all URLs must be properly URL encoded_
+
+**Query Operators**
+
+<table>
+  <tr>
+    <th>Operator</th>
+    <th>Example</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>=</td>
+    <td>name=host1</th>
+    <td>String or numerical equals</td>
+  </tr>
+  <tr>
+    <td>!=</td>
+    <td>host!=host1</th>
+    <td>String or numerical not equals</td>
+  </tr>
+  <tr>
+    <td>&lt;</td>
+    <td>disk_total&lt;50</th>
+    <td>Numerical less than</td>
+  </tr>
+  <tr>
+    <td>&gt;</td>
+    <td>disk_total&gt;50</th>
+    <td>Numerical greater than</td>
+  </tr>
+  <tr>
+    <td>&lt;=</td>
+    <td>disk_total&lt;=50</th>
+    <td>Numerical less than or equals</td>
+  </tr>
+  <tr>
+    <td>&gt;=</td>
+    <td>disk_total&gt;=50</th>
+    <td>Numerical greater than or equals</td>
+  </tr>
+  <tr>
+    <td>or</td>
+    <td>disk_total&gt;50 or disk_free&lt;100</th>
+    <td>Logial 'or'</td>
+  </tr>
+</table>
+
+**Example: Get all hosts with less than 100 "disk_total"**
+
+    GET  /api/v1/clusters/c1/hosts?metrics/disk/disk_total<100
+
+Errors
+----
+
+This section describes how errors are represented in a response.
+
+**Response**
+
+    404 Not Found
+    {
+      “status”: 404,
+      “message”: “standard message”,
+      “developerMessage”: “verbose developers message”,
+      “code”: 1234,
+      “moreInfo”, “...”
+    }
+

+ 70 - 0
ambari-server/docs/api/v1/services-service.md

@@ -0,0 +1,70 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+View Service Information
+=====
+
+[Back to Resources](index.md#resources)
+
+Refers to a specific service identified by ":serviceName" for a given cluster.
+
+    GET /clusters/:name/services/:serviceName
+
+**Response**
+
+    200 OK
+    {
+    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS",
+    "ServiceInfo" : {
+      "cluster_name" : "MyCluster",
+      "service_name" : "HDFS"
+      },
+    "components" : [
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/NAMENODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "NAMENODE",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/DATANODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "DATANODE",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/HDFS_CLIENT",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "HDFS_CLIENT",
+        "service_name" : "HDFS"
+        }
+      },
+      {
+      "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HDFS/components/SECONDARY_NAMENODE",
+      "ServiceComponentInfo" : {
+        "cluster_name" : "MyCluster",
+        "component_name" : "SECONDARY_NAMENODE",
+        "service_name" : "HDFS"
+        }
+      } ]
+    }
+

+ 55 - 0
ambari-server/docs/api/v1/services.md

@@ -0,0 +1,55 @@
+<!---
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+List Services
+=====
+
+[Back to Resources](index.md#resources)
+
+Returns a collection of the services in a given cluster.
+
+    GET /clusters/:name/services
+
+**Response**
+
+    200 OK
+    {
+    "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services",
+    "items" : [
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/NAGIOS",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "NAGIOS"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/HCATALOG",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "HCATALOG"
+          }
+        },
+        {
+        "href" : "http://your.ambari.server/api/v1/clusters/MyCluster/services/PIG",
+        "ServiceInfo" : {
+          "cluster_name" : "MyCluster",
+          "service_name" : "PIG"
+          }
+        }
+      ]
+    }

+ 11 - 12
ambari-server/pom.xml

@@ -51,21 +51,20 @@
       <plugin>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
-          <version>0.8</version>
         <configuration>
-          <numUnapprovedLicenses>8</numUnapprovedLicenses>
           <excludes>
-              <exclude>pass.txt</exclude>
-              <exclude>derby.log</exclude>
-              <exclude>src/test/resources/users.ldif</exclude>
-              <exclude>src/main/resources/ca.config</exclude>
-              <exclude>src/main/resources/db/serial</exclude>
-              <exclude>src/main/resources/db/index.txt</exclude>
-              <exclude>conf/unix/ca.config</exclude>
+            <exclude>pass.txt</exclude>
+            <exclude>derby.log</exclude>
+            <exclude>src/test/resources/users.ldif</exclude>
+            <exclude>src/main/resources/ca.config</exclude>
+            <exclude>src/main/resources/db/serial</exclude>
+            <exclude>src/main/resources/db/index.txt</exclude>
+            <exclude>conf/unix/ca.config</exclude>
+            <exclude>**/*.json</exclude>
+
+            <!--gitignore content-->
+            <exclude>src/main/resources/db/newcerts/**</exclude>
           </excludes>
-          <includes>
-            <include>pom.xml</include>
-          </includes>
         </configuration>
       </plugin>
       <plugin>

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/eventdb/db/PostgresConnector.java

@@ -395,8 +395,8 @@ public class PostgresConnector implements DBConnector {
       throws IOException {
     if (db == null)
       throw new IOException("postgres db not initialized");
-    String limitClause = " ORDER BY " + field.toString() + " " + (sortAscending ? SORT_ASC : SORT_DESC) + " NULLS " + (sortAscending ? "FIRST " : "LAST ")
-        + "OFFSET " + offset + (limit >= 0 ? " LIMIT " + limit : "");
+    String limitClause = " ORDER BY " + field.toString() + " " + (sortAscending ? SORT_ASC : SORT_DESC) + " OFFSET " + offset
+        + (limit >= 0 ? " LIMIT " + limit : "");
     return getQualifiedPS(statement, searchClause + limitClause);
   }
   

+ 5 - 2
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionManager.java

@@ -24,6 +24,7 @@ import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.utils.StageUtils;
 import org.slf4j.Logger;
@@ -42,17 +43,19 @@ public class ActionManager {
   private final ActionScheduler scheduler;
   private final ActionDBAccessor db;
   private final ActionQueue actionQueue;
+  private final HostsMap hostsMap;
   private static Logger LOG = LoggerFactory.getLogger(ActionManager.class);
   private final AtomicLong requestCounter;
 
   @Inject
   public ActionManager(@Named("schedulerSleeptime") long schedulerSleepTime,
       @Named("actionTimeout") long actionTimeout,
-      ActionQueue aq, Clusters fsm, ActionDBAccessor db) {
+      ActionQueue aq, Clusters fsm, ActionDBAccessor db, HostsMap hostsMap) {
     this.actionQueue = aq;
     this.db = db;
+    this.hostsMap = hostsMap;
     scheduler = new ActionScheduler(schedulerSleepTime, actionTimeout, db,
-        actionQueue, fsm, 2);
+        actionQueue, fsm, 2, hostsMap);
     requestCounter = new AtomicLong(
         db.getLastPersistedRequestIdWhenInitialized());
   }

+ 6 - 1
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ActionScheduler.java

@@ -28,6 +28,7 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.ServiceComponentNotFoundException;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
@@ -54,11 +55,13 @@ class ActionScheduler implements Runnable {
   private final ActionQueue actionQueue;
   private final Clusters fsmObject;
   private boolean taskTimeoutAdjustment = true;
+  private final HostsMap hostsMap;
 
   public ActionScheduler(long sleepTimeMilliSec, long actionTimeoutMilliSec,
       ActionDBAccessor db, ActionQueue actionQueue, Clusters fsmObject,
-      int maxAttempts) {
+      int maxAttempts, HostsMap hostsMap) {
     this.sleepTime = sleepTimeMilliSec;
+    this.hostsMap = hostsMap;
     this.actionTimeout = actionTimeoutMilliSec;
     this.db = db;
     this.actionQueue = actionQueue;
@@ -283,6 +286,8 @@ class ActionScheduler implements Runnable {
     s.setLastAttemptTime(hostname, roleStr, now);
     s.incrementAttemptCount(hostname, roleStr);
     LOG.info("Scheduling command: "+cmd.toString()+" for host: "+hostname);
+    /** change the hostname in the command for the host itself **/
+    cmd.setHostname(hostsMap.getHostMap(hostname));
     actionQueue.enqueue(hostname, cmd);
     db.hostRoleScheduled(s, hostname, roleStr);
   }

+ 12 - 0
ambari-server/src/main/java/org/apache/ambari/server/configuration/Configuration.java

@@ -105,6 +105,9 @@ public class Configuration {
   public static final String OS_VERSION_KEY =
       "server.os_type";
 
+  public static final String SRVR_HOSTS_MAPPING = 
+      "server.hosts.mapping";
+  
   private static final String SRVR_KSTR_DIR_DEFAULT = ".";
   public static final String SRVR_CRT_NAME_DEFAULT = "ca.crt";
   public static final String SRVR_KEY_NAME_DEFAULT = "ca.key";
@@ -289,6 +292,15 @@ public class Configuration {
     return properties.getProperty(WEBAPP_DIR, "web");
   }
 
+  /**
+   * Get the file that will be used for host mapping.
+   * @return null if such a file is not present, value if present.
+   */
+  public String getHostsMapFile() {
+    LOG.info("Hosts Mapping File " +  properties.getProperty(SRVR_HOSTS_MAPPING));
+    return properties.getProperty(SRVR_HOSTS_MAPPING);
+  }
+  
   /**
    * Gets ambari stack-path
    * @return String

+ 6 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -105,7 +105,9 @@ public class AmbariManagementControllerImpl implements
   private AmbariMetaInfo ambariMetaInfo;
   @Inject
   private Users users;
-
+  @Inject
+  private HostsMap hostsMap;
+  
   final private String masterHostname;
 
   final private static String JDK_RESOURCE_LOCATION =
@@ -898,7 +900,7 @@ public class AmbariManagementControllerImpl implements
 
     // Generate cluster host info
     execCmd.setClusterHostInfo(
-        StageUtils.getClusterHostInfo(cluster));
+        StageUtils.getClusterHostInfo(cluster, hostsMap));
 
     Host host = clusters.getHost(scHost.getHostName());
 
@@ -1687,7 +1689,7 @@ public class AmbariManagementControllerImpl implements
         // Generate cluster host info
         stage.getExecutionCommandWrapper(clientHost, smokeTestRole)
             .getExecutionCommand()
-            .setClusterHostInfo(StageUtils.getClusterHostInfo(cluster));
+            .setClusterHostInfo(StageUtils.getClusterHostInfo(cluster, hostsMap));
       }
 
       RoleGraph rg = new RoleGraph(rco);
@@ -3156,7 +3158,7 @@ public class AmbariManagementControllerImpl implements
         .getExecutionCommandWrapper(hostName, actionRequest.getActionName())
         .getExecutionCommand()
         .setClusterHostInfo(
-            StageUtils.getClusterHostInfo(clusters.getCluster(clusterName)));
+            StageUtils.getClusterHostInfo(clusters.getCluster(clusterName), hostsMap));
   }
 
   private void addDecommissionDatanodeAction(

+ 6 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/ControllerModule.java

@@ -51,15 +51,18 @@ public class ControllerModule extends AbstractModule {
 
   private final Configuration configuration;
   private final AmbariMetaInfo ambariMetaInfo;
-
+  private final HostsMap hostsMap;
+  
   public ControllerModule() throws Exception {
     configuration = new Configuration();
     ambariMetaInfo = new AmbariMetaInfo(configuration);
+    hostsMap = new HostsMap(configuration);
   }
 
   public ControllerModule(Properties properties) throws Exception {
     configuration = new Configuration(properties);
     ambariMetaInfo = new AmbariMetaInfo(configuration);
+    hostsMap = new HostsMap(configuration);
   }
 
   @Override
@@ -69,7 +72,8 @@ public class ControllerModule extends AbstractModule {
 
     bind(Configuration.class).toInstance(configuration);
     bind(AmbariMetaInfo.class).toInstance(ambariMetaInfo);
-
+    bind(HostsMap.class).toInstance(hostsMap);
+    
     bind(PasswordEncoder.class).toInstance(new StandardPasswordEncoder());
 
     JpaPersistModule jpaPersistModule = new JpaPersistModule(configuration.getPersistenceType().getUnitName());

+ 95 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/HostsMap.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Properties;
+
+import org.apache.ambari.server.configuration.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+/**
+ * Stores the mapping of hostnames to be used in any configuration on 
+ * the server.
+ *  
+ */
+@Singleton
+public class HostsMap {
+  private final static Logger LOG = LoggerFactory
+      .getLogger(HostsMap.class);
+
+  private String hostsMapFile;
+  private Properties hostsMap;
+
+  @Inject
+  public HostsMap(Configuration conf) {
+    hostsMapFile = conf.getHostsMapFile();
+    setupMap();
+  }
+  
+  public HostsMap(String file) {
+    hostsMapFile = file;
+  }
+
+  public void setupMap() {
+    InputStream inputStream = null;
+    LOG.info("Using hostsmap file " + this.hostsMapFile);
+    try {
+      if (hostsMapFile != null) {
+        hostsMap = new Properties();
+        inputStream = new FileInputStream(new File(hostsMapFile));
+        // load the properties
+        hostsMap.load(inputStream);
+      }
+    } catch (FileNotFoundException fnf) {
+      LOG.info("No configuration file " + hostsMapFile + " found in classpath.", fnf);
+    } catch (IOException ie) {
+      throw new IllegalArgumentException("Can't read configuration file " +
+          hostsMapFile, ie);
+    } finally {
+      if (inputStream != null) {
+        try {
+          inputStream.close();
+        } catch(IOException io) {
+          //ignore 
+        }
+      }
+    }
+  }
+
+/**
+ * Return map of the hostname if available
+ * @param hostName hostname map
+ * @return 
+ */
+public String getHostMap(String hostName) {
+  if (hostsMapFile == null) 
+    return hostName;
+  return hostsMap.getProperty(hostName, hostName);
+}
+
+}

+ 172 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/ClusterDefinition.java

@@ -0,0 +1,172 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * Defines the cluster created by gsInstaller.
+ */
+public class ClusterDefinition {
+
+  private static final String CLUSTER_DEFINITION_FILE = "gsInstaller-hosts.txt";
+  private static final String DEFAULT_CLUSTER_NAME    = "ambari";
+  private static final String CLUSTER_NAME_TAG        = "CLUSTER=";
+
+  private final String clusterName;
+  private final Set<String> services = new HashSet<String>();
+  private final Set<String> hosts = new HashSet<String>();
+  private final Map<String, Set<String>> components = new HashMap<String, Set<String>>();
+  private final Map<String, Map<String, Set<String>>> hostComponents = new HashMap<String, Map<String, Set<String>>>();
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Create a cluster definition.
+   */
+  public ClusterDefinition() {
+    this.clusterName = readClusterDefinition();
+  }
+
+
+  // ----- ClusterDefinition -------------------------------------------------
+
+  /**
+   * Get the name of the cluster.
+   *
+   * @return the cluster name
+   */
+  public String getClusterName() {
+    return clusterName;
+  }
+
+  /**
+   * Get the services for the cluster.
+   *
+   * @return the set of service names
+   */
+  public Set<String> getServices() {
+    return services;
+  }
+
+  /**
+   * Get the hosts for the cluster.
+   *
+   * @return the set of hosts names
+   */
+  public Set<String> getHosts() {
+    return hosts;
+  }
+
+  /**
+   * Get the components for the given service.
+   *
+   * @param service  the service name
+   *
+   * @return the set of component names for the given service name
+   */
+  public Set<String> getComponents(String service) {
+    return components.get(service);
+  }
+
+  /**
+   * Get the host components for the given service and host.
+   *
+   * @param service  the service name
+   * @param host     the host name
+   *
+   * @return the set of host component names for the given service and host names
+   */
+  public Set<String> getHostComponents(String service, String host) {
+    Set<String> resultSet = null;
+    Map<String, Set<String>> serviceHostComponents = hostComponents.get(service);
+    if (serviceHostComponents != null) {
+      resultSet = serviceHostComponents.get(host);
+    }
+    return resultSet == null ? Collections.<String>emptySet() : resultSet;
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Read the gsInstaller cluster definition file.
+   *
+   * @return the cluster name
+   */
+  private String readClusterDefinition() {
+    String clusterName = DEFAULT_CLUSTER_NAME;
+
+    try {
+      InputStream    is = this.getClass().getClassLoader().getResourceAsStream(CLUSTER_DEFINITION_FILE);
+      BufferedReader br = new BufferedReader(new InputStreamReader(is));
+
+      String line;
+      while ((line = br.readLine()) != null) {
+        line = line.trim();
+        if (line.startsWith(CLUSTER_NAME_TAG)) {
+          clusterName = line.substring(CLUSTER_NAME_TAG.length());
+        }
+        else {
+          String[] parts = line.split("\\s+");
+          assert(parts.length == 3);
+
+          String serviceName   = parts[0];
+          String componentName = parts[1];
+          String hostName      = parts[2];
+
+          services.add(serviceName);
+          Set<String> serviceComponents = components.get(serviceName);
+          if (serviceComponents == null) {
+            serviceComponents = new HashSet<String>();
+            components.put(serviceName, serviceComponents);
+          }
+          serviceComponents.add(componentName);
+
+          Map<String, Set<String>> serviceHostComponents = hostComponents.get(serviceName);
+          if (serviceHostComponents == null) {
+            serviceHostComponents = new HashMap<String, Set<String>>();
+            hostComponents.put(serviceName, serviceHostComponents);
+          }
+
+          Set<String> hostHostComponents = serviceHostComponents.get(hostName);
+          if (hostHostComponents == null) {
+            hostHostComponents = new HashSet<String>();
+            serviceHostComponents.put(hostName, hostHostComponents);
+          }
+          hostHostComponents.add(componentName);
+          hosts.add(hostName);
+        }
+      }
+    } catch (IOException e) {
+      String msg = "Caught exception reading " + CLUSTER_DEFINITION_FILE + ".";
+      throw new IllegalStateException(msg, e);
+    }
+    return clusterName;
+  }
+}

+ 73 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProvider.java

@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A cluster resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerClusterProvider extends GSInstallerResourceProvider{
+
+  // Clusters
+  protected static final String CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("Clusters", "cluster_name");
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerClusterProvider(ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    initClusterResources();
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(Resource.Type.Cluster);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(Resource.Type.Cluster);
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Create the resources based on the cluster definition.
+   */
+  private void initClusterResources() {
+    Resource cluster = new ResourceImpl(Resource.Type.Cluster);
+    cluster.setProperty(CLUSTER_NAME_PROPERTY_ID, getClusterDefinition().getClusterName());
+    addResource(cluster);
+  }
+}

+ 84 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProvider.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A component resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerComponentProvider extends GSInstallerResourceProvider{
+
+  // Components
+  protected static final String COMPONENT_CLUSTER_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name");
+  protected static final String COMPONENT_SERVICE_NAME_PROPERTY_ID    = PropertyHelper.getPropertyId("ServiceComponentInfo", "service_name");
+  protected static final String COMPONENT_COMPONENT_NAME_PROPERTY_ID  = PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name");
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerComponentProvider(ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    initComponentResources();
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(Resource.Type.Component);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(Resource.Type.Component);
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Create the resources based on the cluster definition.
+   */
+  private void initComponentResources() {
+    String      clusterName = getClusterDefinition().getClusterName();
+    Set<String> services    = getClusterDefinition().getServices();
+    for (String serviceName : services) {
+      Set<String> components = getClusterDefinition().getComponents(serviceName);
+      for (String componentName : components) {
+        Resource component = new ResourceImpl(Resource.Type.Component);
+        component.setProperty(COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
+        component.setProperty(COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
+        component.setProperty(COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
+        addResource(component);
+      }
+    }
+  }
+}

+ 89 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProvider.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A host component resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerHostComponentProvider extends GSInstallerResourceProvider{
+
+  // Host Components
+  protected static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
+  protected static final String HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "service_name");
+  protected static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
+  protected static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerHostComponentProvider(ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    initHostComponentResources();
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(Resource.Type.HostComponent);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(Resource.Type.HostComponent);
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Create the resources based on the cluster definition.
+   */
+  private void initHostComponentResources() {
+    String      clusterName = getClusterDefinition().getClusterName();
+    Set<String> services    = getClusterDefinition().getServices();
+    for (String serviceName : services) {
+      Set<String> hosts = getClusterDefinition().getHosts();
+      for (String hostName : hosts) {
+        Set<String> hostComponents = getClusterDefinition().getHostComponents(serviceName, hostName);
+        for (String componentName : hostComponents) {
+          Resource hostComponent = new ResourceImpl(Resource.Type.HostComponent);
+          hostComponent.setProperty(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID, clusterName);
+          hostComponent.setProperty(HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID, serviceName);
+          hostComponent.setProperty(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID, componentName);
+          hostComponent.setProperty(HOST_COMPONENT_HOST_NAME_PROPERTY_ID, hostName);
+          addResource(hostComponent);
+        }
+      }
+    }
+  }
+}

+ 82 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProvider.java

@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A host resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerHostProvider extends GSInstallerResourceProvider{
+
+  // Hosts
+  protected static final String HOST_CLUSTER_NAME_PROPERTY_ID =
+      PropertyHelper.getPropertyId("Hosts", "cluster_name");
+  protected static final String HOST_NAME_PROPERTY_ID =
+      PropertyHelper.getPropertyId("Hosts", "host_name");
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerHostProvider(ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    initHostResources();
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(Resource.Type.Host);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(Resource.Type.Host);
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Create the resources based on the cluster definition.
+   */
+  private void initHostResources() {
+    String      clusterName = getClusterDefinition().getClusterName();
+    Set<String> hosts       = getClusterDefinition().getHosts();
+
+    for (String hostName : hosts) {
+      Resource host = new ResourceImpl(Resource.Type.Host);
+      host.setProperty(HOST_CLUSTER_NAME_PROPERTY_ID, clusterName);
+      host.setProperty(HOST_NAME_PROPERTY_ID, hostName);
+      addResource(host);
+    }
+  }
+}

+ 52 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerNoOpProvider.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A NO-OP resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerNoOpProvider extends GSInstallerResourceProvider{
+
+  private final Resource.Type type;
+
+  // ----- Constructors ------------------------------------------------------
+
+  public GSInstallerNoOpProvider(Resource.Type type, ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    this.type = type;
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(type);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(type);
+  }
+}

+ 44 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerProviderModule.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.AbstractProviderModule;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+
+/**
+ * A provider module implementation that uses the GSInstaller resource provider.
+ */
+public class GSInstallerProviderModule extends AbstractProviderModule {
+
+  private final ClusterDefinition clusterDefinition;
+
+  // ----- Constructors ------------------------------------------------------
+
+  public GSInstallerProviderModule() {
+    clusterDefinition = new ClusterDefinition();
+  }
+
+  // ----- utility methods ---------------------------------------------------
+
+  @Override
+  protected ResourceProvider createResourceProvider(Resource.Type type) {
+    return GSInstallerResourceProvider.getResourceProvider(type, clusterDefinition);
+  }
+}

+ 148 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerResourceProvider.java

@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.NoSuchParentResourceException;
+import org.apache.ambari.server.controller.spi.NoSuchResourceException;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Request;
+import org.apache.ambari.server.controller.spi.RequestStatus;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceAlreadyExistsException;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
+import org.apache.ambari.server.controller.spi.SystemException;
+import org.apache.ambari.server.controller.spi.UnsupportedPropertyException;
+
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * An abstract resource provider for a gsInstaller defined cluster.
+ */
+public abstract class GSInstallerResourceProvider implements ResourceProvider {
+
+  private final ClusterDefinition clusterDefinition;
+
+  private final Set<Resource> resources = new HashSet<Resource>();
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerResourceProvider(ClusterDefinition clusterDefinition) {
+    this.clusterDefinition = clusterDefinition;
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public RequestStatus createResources(Request request)
+      throws SystemException, UnsupportedPropertyException, ResourceAlreadyExistsException, NoSuchParentResourceException {
+    throw new UnsupportedOperationException("Management operations are not supported");
+  }
+
+  @Override
+  public Set<Resource> getResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+
+    Set<Resource> resultSet = new HashSet<Resource>();
+
+    for (Resource resource : resources) {
+      if (predicate == null || predicate.evaluate(resource)) {
+        resultSet.add(new ResourceImpl(resource));
+      }
+    }
+    return resultSet;
+  }
+
+  @Override
+  public RequestStatus updateResources(Request request, Predicate predicate)
+      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+    throw new UnsupportedOperationException("Management operations are not supported");
+  }
+
+  @Override
+  public RequestStatus deleteResources(Predicate predicate)
+      throws SystemException, UnsupportedPropertyException, NoSuchResourceException, NoSuchParentResourceException {
+    throw new UnsupportedOperationException("Management operations are not supported");
+  }
+
+  @Override
+  public Set<String> checkPropertyIds(Set<String> propertyIds) {
+    propertyIds = new HashSet<String>(propertyIds);
+    propertyIds.removeAll(getPropertyIdsForSchema());
+    return propertyIds;
+  }
+
+
+  // ----- accessors ---------------------------------------------------------
+
+  /**
+   * Get the configuration provider.
+   *
+   * @return the configuration provider
+   */
+  protected ClusterDefinition getClusterDefinition() {
+    return clusterDefinition;
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Add a resource to the set of resources provided by this provider.
+   *
+   * @param resource  the resource to add
+   */
+  protected void addResource(Resource resource) {
+    resources.add(resource);
+  }
+
+  /**
+   * Factory method for obtaining a resource provider based on a given type.
+   *
+   * @param type               the resource type
+   * @param clusterDefinition  the cluster definition
+   *
+   * @return a new resource provider
+   */
+  public static ResourceProvider getResourceProvider(Resource.Type type,
+                                                     ClusterDefinition clusterDefinition) {
+    switch (type) {
+      case Cluster:
+        return new GSInstallerClusterProvider(clusterDefinition);
+      case Service:
+        return new GSInstallerServiceProvider(clusterDefinition);
+      case Component:
+        return new GSInstallerComponentProvider(clusterDefinition);
+      case Host:
+        return new GSInstallerHostProvider(clusterDefinition);
+      case HostComponent:
+        return new GSInstallerHostComponentProvider(clusterDefinition);
+      default:
+        return new GSInstallerNoOpProvider(type, clusterDefinition);
+    }
+  }
+}

+ 80 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProvider.java

@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import org.apache.ambari.server.controller.internal.ResourceImpl;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A service resource provider for a gsInstaller defined cluster.
+ */
+public class GSInstallerServiceProvider extends GSInstallerResourceProvider{
+
+  // Services
+  protected static final String SERVICE_CLUSTER_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "cluster_name");
+  protected static final String SERVICE_SERVICE_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("ServiceInfo", "service_name");
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Construct a resource provider based on the given cluster definition.
+   *
+   * @param clusterDefinition  the cluster definition
+   */
+  public GSInstallerServiceProvider(ClusterDefinition clusterDefinition) {
+    super(clusterDefinition);
+    initServiceResources();
+  }
+
+
+  // ----- ResourceProvider --------------------------------------------------
+
+  @Override
+  public Set<String> getPropertyIdsForSchema() {
+    return PropertyHelper.getPropertyIds(Resource.Type.Service);
+  }
+
+  @Override
+  public Map<Resource.Type, String> getKeyPropertyIds() {
+    return PropertyHelper.getKeyPropertyIds(Resource.Type.Service);
+  }
+
+
+  // ----- helper methods ----------------------------------------------------
+
+  /**
+   * Create the resources based on the cluster definition.
+   */
+  private void initServiceResources() {
+    String      clusterName = getClusterDefinition().getClusterName();
+    Set<String> services    = getClusterDefinition().getServices();
+
+    for (String serviceName : services) {
+      Resource service = new ResourceImpl(Resource.Type.Service);
+      service.setProperty(SERVICE_CLUSTER_NAME_PROPERTY_ID, clusterName);
+      service.setProperty(SERVICE_SERVICE_NAME_PROPERTY_ID, serviceName);
+      addResource(service);
+    }
+  }
+}

+ 346 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AbstractProviderModule.java

@@ -0,0 +1,346 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.internal;
+
+import org.apache.ambari.server.controller.AmbariServer;
+import org.apache.ambari.server.controller.ganglia.GangliaComponentPropertyProvider;
+import org.apache.ambari.server.controller.ganglia.GangliaHostComponentPropertyProvider;
+import org.apache.ambari.server.controller.ganglia.GangliaHostPropertyProvider;
+import org.apache.ambari.server.controller.ganglia.GangliaReportPropertyProvider;
+import org.apache.ambari.server.controller.ganglia.GangliaHostProvider;
+import org.apache.ambari.server.controller.jmx.JMXHostProvider;
+import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
+import org.apache.ambari.server.controller.spi.*;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.apache.ambari.server.controller.AmbariManagementController;
+
+import com.google.inject.Inject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * An abstract provider module implementation.
+ */
+public abstract class AbstractProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider {
+
+  private static final String HOST_CLUSTER_NAME_PROPERTY_ID             = PropertyHelper.getPropertyId("Hosts", "cluster_name");
+  private static final String HOST_NAME_PROPERTY_ID                     = PropertyHelper.getPropertyId("Hosts", "host_name");
+  private static final String HOST_IP_PROPERTY_ID                       = PropertyHelper.getPropertyId("Hosts", "ip");
+  private static final String CLUSTER_NAME_PROPERTY_ID                  = PropertyHelper.getPropertyId("Clusters", "cluster_name");
+  private static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
+  private static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
+  private static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
+  private static final String GANGLIA_SERVER                            = "GANGLIA_SERVER";
+  private static final String GANGLIA_MONITOR                           = "GANGLIA_MONITOR";
+  private static final String GANGLIA_SERVER_OLD                        = "GANGLIA_MONITOR_SERVER";
+
+  /**
+   * The map of resource providers.
+   */
+  private final Map<Resource.Type, ResourceProvider> resourceProviders = new HashMap<Resource.Type, ResourceProvider>();
+
+  /**
+   * The map of lists of property providers.
+   */
+  private final Map<Resource.Type,List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
+
+  @Inject
+  private AmbariManagementController managementController;
+
+  /**
+   * The map of hosts.
+   */
+  private Map<String, Map<String, String>> clusterHostMap;
+
+  private Map<String, Map<String, String>> clusterHostComponentMap;
+
+  /**
+   * The host name of the Ganglia collector.
+   */
+  private Map<String, String> clusterGangliaCollectorMap;
+
+  private volatile boolean initialized = false;
+
+  protected final static Logger LOG =
+      LoggerFactory.getLogger(AbstractProviderModule.class);
+
+
+  // ----- Constructors ------------------------------------------------------
+
+  /**
+   * Create a default provider module.
+   */
+  public AbstractProviderModule() {
+    if (managementController == null) {
+      managementController = AmbariServer.getController();
+    }
+  }
+
+
+  // ----- ProviderModule ----------------------------------------------------
+
+  @Override
+  public ResourceProvider getResourceProvider(Resource.Type type) {
+    if (!propertyProviders.containsKey(type)) {
+      registerResourceProvider(type);
+    }
+    return resourceProviders.get(type);
+  }
+
+  @Override
+  public List<PropertyProvider> getPropertyProviders(Resource.Type type) {
+
+    if (!propertyProviders.containsKey(type)) {
+      createPropertyProviders(type);
+    }
+    return propertyProviders.get(type);
+  }
+
+
+  // ----- ResourceProviderObserver ------------------------------------------
+
+  @Override
+  public void update(ResourceProviderEvent event) {
+    Resource.Type type = event.getResourceType();
+
+    if (type == Resource.Type.Cluster ||
+        type == Resource.Type.Host ||
+        type == Resource.Type.HostComponent) {
+      resetInit();
+    }
+  }
+
+
+  // ----- JMXHostProvider ---------------------------------------------------
+
+  @Override
+  public String getHostName(String clusterName, String componentName) throws SystemException {
+    checkInit();
+    return clusterHostComponentMap.get(clusterName).get(componentName);
+  }
+
+  @Override
+  public Map<String, String> getHostMapping(String clusterName) throws SystemException {
+    checkInit();
+    return clusterHostMap.get(clusterName);
+  }
+
+
+  // ----- GangliaHostProvider -----------------------------------------------
+
+  @Override
+  public String getGangliaCollectorHostName(String clusterName) throws SystemException {
+    checkInit();
+    return clusterGangliaCollectorMap.get(clusterName);
+  }
+
+
+  // ----- utility methods ---------------------------------------------------
+
+  protected abstract ResourceProvider createResourceProvider(Resource.Type type);
+
+  protected void registerResourceProvider(Resource.Type type) {
+    ResourceProvider resourceProvider = createResourceProvider(type);
+
+    if (resourceProvider instanceof ObservableResourceProvider) {
+      ((ObservableResourceProvider)resourceProvider).addObserver(this);
+    }
+
+    putResourceProvider(type, resourceProvider);
+  }
+
+  protected void putResourceProvider(Resource.Type type, ResourceProvider resourceProvider) {
+    resourceProviders.put( type , resourceProvider);
+  }
+
+  protected void putPropertyProviders(Resource.Type type, List<PropertyProvider> providers) {
+    propertyProviders.put(type, providers);
+  }
+
+  protected void createPropertyProviders(Resource.Type type) {
+
+    List<PropertyProvider> providers = new LinkedList<PropertyProvider>();
+
+    URLStreamProvider streamProvider = new URLStreamProvider();
+
+    switch (type){
+      case Cluster :
+        providers.add(new GangliaReportPropertyProvider(
+            PropertyHelper.getGangliaPropertyIds(type).get("*"),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("Clusters", "cluster_name")));
+        break;
+      case Host :
+        providers.add(new GangliaHostPropertyProvider(
+            PropertyHelper.getGangliaPropertyIds(type),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("Hosts", "cluster_name"),
+            PropertyHelper.getPropertyId("Hosts", "host_name")
+        ));
+        break;
+      case Component :
+        providers.add(new JMXPropertyProvider(
+            PropertyHelper.getJMXPropertyIds(type),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+            null,
+            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
+
+        providers.add(new GangliaComponentPropertyProvider(
+            PropertyHelper.getGangliaPropertyIds(type),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
+            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
+        break;
+      case HostComponent:
+        providers.add(new JMXPropertyProvider(
+            PropertyHelper.getJMXPropertyIds(type),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+            PropertyHelper.getPropertyId("HostRoles", "host_name"),
+            PropertyHelper.getPropertyId("HostRoles", "component_name")));
+
+        providers.add(new GangliaHostComponentPropertyProvider(
+            PropertyHelper.getGangliaPropertyIds(type),
+            streamProvider,
+            this,
+            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
+            PropertyHelper.getPropertyId("HostRoles", "host_name"),
+            PropertyHelper.getPropertyId("HostRoles", "component_name")));
+        break;
+      default :
+        break;
+    }
+    putPropertyProviders(type, providers);
+  }
+
+  private void checkInit() throws SystemException{
+    if (!initialized) {
+      synchronized (this) {
+        if (!initialized) {
+          initProviderMaps();
+          initialized = true;
+        }
+      }
+    }
+  }
+
+  private void resetInit() {
+    if (initialized) {
+      synchronized (this) {
+        initialized = false;
+      }
+    }
+  }
+
+  private void initProviderMaps() throws SystemException{
+    ResourceProvider provider = getResourceProvider(Resource.Type.Cluster);
+    Request          request  = PropertyHelper.getReadRequest(CLUSTER_NAME_PROPERTY_ID);
+
+    try {
+      Set<Resource> clusters = provider.getResources(request, null);
+
+      clusterHostMap             = new HashMap<String, Map<String, String>>();
+      clusterHostComponentMap    = new HashMap<String, Map<String, String>>();
+      clusterGangliaCollectorMap = new HashMap<String, String>();
+
+      for (Resource cluster : clusters) {
+
+        String clusterName = (String) cluster.getPropertyValue(CLUSTER_NAME_PROPERTY_ID);
+
+        // initialize the host map from the known hosts...
+        provider = getResourceProvider(Resource.Type.Host);
+        request  = PropertyHelper.getReadRequest(HOST_NAME_PROPERTY_ID, HOST_IP_PROPERTY_ID);
+
+        Predicate predicate   = new PredicateBuilder().property(HOST_CLUSTER_NAME_PROPERTY_ID).
+            equals(clusterName).toPredicate();
+
+        Set<Resource>       hosts   = provider.getResources(request, predicate);
+        Map<String, String> hostMap = clusterHostMap.get(clusterName);
+
+        if (hostMap == null) {
+          hostMap = new HashMap<String, String>();
+          clusterHostMap.put(clusterName, hostMap);
+        }
+
+        for (Resource host : hosts) {
+          String hostName = (String) host.getPropertyValue(HOST_NAME_PROPERTY_ID);
+          String hostIp   = (String) host.getPropertyValue(HOST_IP_PROPERTY_ID);
+          hostMap.put(hostName, hostIp == null ? hostName : hostIp);
+        }
+
+        // initialize the host component map and Ganglia server from the known hosts components...
+        provider = getResourceProvider(Resource.Type.HostComponent);
+
+        request = PropertyHelper.getReadRequest(HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
+            HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+
+        predicate = new PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
+            equals(clusterName).toPredicate();
+
+        Set<Resource>       hostComponents   = provider.getResources(request, predicate);
+        Map<String, String> hostComponentMap = clusterHostComponentMap.get(clusterName);
+
+        if (hostComponentMap == null) {
+          hostComponentMap = new HashMap<String, String>();
+          clusterHostComponentMap.put(clusterName, hostComponentMap);
+        }
+
+        for (Resource hostComponent : hostComponents) {
+          String componentName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
+          String hostName      = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
+
+          hostComponentMap.put(componentName, hostMap.get(hostName));
+
+          // record the Ganglia server for the current cluster
+          if (componentName.equals(GANGLIA_SERVER) || componentName.equals(GANGLIA_MONITOR) ||componentName.equals(GANGLIA_SERVER_OLD)) {
+            clusterGangliaCollectorMap.put(clusterName, clusterHostMap.get(clusterName).get(hostName));
+          }
+        }
+      }
+    } catch (UnsupportedPropertyException e) {
+      if (LOG.isErrorEnabled()) {
+        LOG.error("Caught UnsupportedPropertyException while trying to get the host mappings.", e);
+      }
+      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
+    } catch (NoSuchResourceException e) {
+      if (LOG.isErrorEnabled()) {
+        LOG.error("Caught NoSuchResourceException exception while trying to get the host mappings.", e);
+      }
+      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
+    } catch (NoSuchParentResourceException e) {
+      if (LOG.isErrorEnabled()) {
+        LOG.error("Caught NoSuchParentResourceException exception while trying to get the host mappings.", e);
+      }
+      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
+    }
+  }
+}

+ 8 - 304
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/DefaultProviderModule.java

@@ -18,80 +18,20 @@
 
 package org.apache.ambari.server.controller.internal;
 
+import com.google.inject.Inject;
+import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.controller.AmbariServer;
-import org.apache.ambari.server.controller.ganglia.GangliaComponentPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostComponentPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaReportPropertyProvider;
-import org.apache.ambari.server.controller.ganglia.GangliaHostProvider;
-import org.apache.ambari.server.controller.jmx.JMXHostProvider;
-import org.apache.ambari.server.controller.jmx.JMXPropertyProvider;
-import org.apache.ambari.server.controller.spi.*;
-import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
-import org.apache.ambari.server.controller.AmbariManagementController;
-
-import com.google.inject.Inject;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
 
 /**
  * The default provider module implementation.
  */
-public class DefaultProviderModule implements ProviderModule, ResourceProviderObserver, JMXHostProvider, GangliaHostProvider {
-
-  private static final String HOST_CLUSTER_NAME_PROPERTY_ID             = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  private static final String HOST_NAME_PROPERTY_ID                     = PropertyHelper.getPropertyId("Hosts", "host_name");
-  private static final String HOST_IP_PROPERTY_ID                       = PropertyHelper.getPropertyId("Hosts", "ip");
-  private static final String HOST_ATTRIBUTES_PROPERTY_ID               = PropertyHelper.getPropertyId("Hosts", "attributes");
-  private static final String CLUSTER_NAME_PROPERTY_ID                  = PropertyHelper.getPropertyId("Clusters", "cluster_name");
-  private static final String HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID   = PropertyHelper.getPropertyId("HostRoles", "cluster_name");
-  private static final String HOST_COMPONENT_HOST_NAME_PROPERTY_ID      = PropertyHelper.getPropertyId("HostRoles", "host_name");
-  private static final String HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID = PropertyHelper.getPropertyId("HostRoles", "component_name");
-  private static final String GANGLIA_SERVER                            = "GANGLIA_SERVER";
-  private static final String GANGLIA_SERVER_OLD                        = "GANGLIA_MONITOR_SERVER";
-
-  /**
-   * The map of resource providers.
-   */
-  private final Map<Resource.Type, ResourceProvider> resourceProviders = new HashMap<Resource.Type, ResourceProvider>();
-
-  /**
-   * The map of lists of property providers.
-   */
-  private final Map<Resource.Type,List<PropertyProvider>> propertyProviders = new HashMap<Resource.Type, List<PropertyProvider>>();
-
+public class DefaultProviderModule extends AbstractProviderModule {
   @Inject
   private AmbariManagementController managementController;
 
-  /**
-   * The map of hosts.
-   */
-  private Map<String, Map<String, String>> clusterHostMap;
-
-  private Map<String, Map<String, String>> clusterHostComponentMap;
-
-  /**
-   * The host name of the Ganglia collector.
-   */
-  private Map<String, String> clusterGangliaCollectorMap;
-
-
-  private volatile boolean initialized = false;
-
-
-
-
-  protected final static Logger LOG =
-      LoggerFactory.getLogger(DefaultProviderModule.class);
-
-
   // ----- Constructors ------------------------------------------------------
 
   /**
@@ -104,247 +44,11 @@ public class DefaultProviderModule implements ProviderModule, ResourceProviderOb
   }
 
 
-  // ----- ProviderModule ----------------------------------------------------
-
-  @Override
-  public ResourceProvider getResourceProvider(Resource.Type type) {
-    if (!propertyProviders.containsKey(type)) {
-      createResourceProvider(type);
-    }
-    return resourceProviders.get(type);
-  }
-
-  @Override
-  public List<PropertyProvider> getPropertyProviders(Resource.Type type) {
-
-    if (!propertyProviders.containsKey(type)) {
-      createPropertyProviders(type);
-    }
-    return propertyProviders.get(type);
-  }
-
-
-  // ----- ResourceProviderObserver ------------------------------------------
-
-  @Override
-  public void update(ResourceProviderEvent event) {
-    Resource.Type type = event.getResourceType();
-
-    if (type == Resource.Type.Cluster ||
-        type == Resource.Type.Host ||
-        type == Resource.Type.HostComponent) {
-      resetInit();
-    }
-  }
-
-
-  // ----- JMXHostProvider ---------------------------------------------------
-
-  @Override
-  public String getHostName(String clusterName, String componentName) throws SystemException {
-    checkInit();
-    return clusterHostComponentMap.get(clusterName).get(componentName);
-  }
-
-  @Override
-  public Map<String, String> getHostMapping(String clusterName) throws SystemException {
-    checkInit();
-    return clusterHostMap.get(clusterName);
-  }
-
-
-  // ----- GangliaHostProvider -----------------------------------------------
-
-  @Override
-  public String getGangliaCollectorHostName(String clusterName) throws SystemException {
-    checkInit();
-    return clusterGangliaCollectorMap.get(clusterName);
-  }
-
-
   // ----- utility methods ---------------------------------------------------
 
-  protected void putResourceProvider(Resource.Type type, ResourceProvider resourceProvider) {
-    resourceProviders.put( type , resourceProvider);
-  }
-
-  protected void createResourceProvider(Resource.Type type) {
-    ResourceProvider resourceProvider =
-        ResourceProviderImpl.getResourceProvider(type, PropertyHelper.getPropertyIds(type),
+  @Override
+  protected ResourceProvider createResourceProvider(Resource.Type type) {
+    return ResourceProviderImpl.getResourceProvider(type, PropertyHelper.getPropertyIds(type),
             PropertyHelper.getKeyPropertyIds(type), managementController);
-
-    if (resourceProvider instanceof ObservableResourceProvider) {
-      ((ObservableResourceProvider)resourceProvider).addObserver(this);
-    }
-
-    putResourceProvider(type, resourceProvider);
-  }
-
-  protected void putPropertyProviders(Resource.Type type, List<PropertyProvider> providers) {
-    propertyProviders.put(type, providers);
-  }
-
-  protected void createPropertyProviders(Resource.Type type) {
-
-    List<PropertyProvider> providers = new LinkedList<PropertyProvider>();
-
-    URLStreamProvider streamProvider = new URLStreamProvider();
-
-    switch (type){
-      case Cluster :
-        providers.add(new GangliaReportPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type).get("*"),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("Clusters", "cluster_name")));
-        break;
-      case Host :
-        providers.add(new GangliaHostPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("Hosts", "cluster_name"),
-            PropertyHelper.getPropertyId("Hosts", "host_name")
-        ));
-        break;
-      case Component :
-        providers.add(new JMXPropertyProvider(
-            PropertyHelper.getJMXPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
-            null,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
-
-        providers.add(new GangliaComponentPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "cluster_name"),
-            PropertyHelper.getPropertyId("ServiceComponentInfo", "component_name")));
-        break;
-      case HostComponent:
-        providers.add(new JMXPropertyProvider(
-            PropertyHelper.getJMXPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name")));
-
-        providers.add(new GangliaHostComponentPropertyProvider(
-            PropertyHelper.getGangliaPropertyIds(type),
-            streamProvider,
-            this,
-            PropertyHelper.getPropertyId("HostRoles", "cluster_name"),
-            PropertyHelper.getPropertyId("HostRoles", "host_name"),
-            PropertyHelper.getPropertyId("HostRoles", "component_name")));
-        break;
-      default :
-        break;
-    }
-    putPropertyProviders(type, providers);
-  }
-
-  private void checkInit() throws SystemException{
-    if (!initialized) {
-      synchronized (this) {
-        if (!initialized) {
-          initProviderMaps();
-          initialized = true;
-        }
-      }
-    }
-  }
-
-  private void resetInit() {
-    if (initialized) {
-      synchronized (this) {
-        initialized = false;
-      }
-    }
-  }
-
-  private void initProviderMaps() throws SystemException{
-    ResourceProvider provider = getResourceProvider(Resource.Type.Cluster);
-    Request          request  = PropertyHelper.getReadRequest(CLUSTER_NAME_PROPERTY_ID);
-
-    try {
-      Set<Resource> clusters = provider.getResources(request, null);
-
-      clusterHostMap             = new HashMap<String, Map<String, String>>();
-      clusterHostComponentMap    = new HashMap<String, Map<String, String>>();
-      clusterGangliaCollectorMap = new HashMap<String, String>();
-
-      for (Resource cluster : clusters) {
-
-        String clusterName = (String) cluster.getPropertyValue(CLUSTER_NAME_PROPERTY_ID);
-
-        // initialize the host map from the known hosts...
-        provider = getResourceProvider(Resource.Type.Host);
-        request  = PropertyHelper.getReadRequest(HOST_NAME_PROPERTY_ID, HOST_IP_PROPERTY_ID,
-            HOST_ATTRIBUTES_PROPERTY_ID);
-
-        Predicate predicate   = new PredicateBuilder().property(HOST_CLUSTER_NAME_PROPERTY_ID).
-            equals(clusterName).toPredicate();
-
-        Set<Resource>       hosts   = provider.getResources(request, predicate);
-        Map<String, String> hostMap = clusterHostMap.get(clusterName);
-
-        if (hostMap == null) {
-          hostMap = new HashMap<String, String>();
-          clusterHostMap.put(clusterName, hostMap);
-        }
-
-        for (Resource host : hosts) {
-          hostMap.put((String) host.getPropertyValue(HOST_NAME_PROPERTY_ID),
-              (String) host.getPropertyValue(HOST_IP_PROPERTY_ID));
-        }
-
-        // initialize the host component map and Ganglia server from the known hosts components...
-        provider = getResourceProvider(Resource.Type.HostComponent);
-
-        request = PropertyHelper.getReadRequest(HOST_COMPONENT_HOST_NAME_PROPERTY_ID,
-            HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-
-        predicate = new PredicateBuilder().property(HOST_COMPONENT_CLUSTER_NAME_PROPERTY_ID).
-            equals(clusterName).toPredicate();
-
-        Set<Resource>       hostComponents   = provider.getResources(request, predicate);
-        Map<String, String> hostComponentMap = clusterHostComponentMap.get(clusterName);
-
-        if (hostComponentMap == null) {
-          hostComponentMap = new HashMap<String, String>();
-          clusterHostComponentMap.put(clusterName, hostComponentMap);
-        }
-
-        for (Resource hostComponent : hostComponents) {
-          String componentName = (String) hostComponent.getPropertyValue(HOST_COMPONENT_COMPONENT_NAME_PROPERTY_ID);
-          String hostName      = (String) hostComponent.getPropertyValue(HOST_COMPONENT_HOST_NAME_PROPERTY_ID);
-
-          hostComponentMap.put(componentName, hostMap.get(hostName));
-
-          // record the Ganglia server for the current cluster
-          if (componentName.equals(GANGLIA_SERVER) || componentName.equals(GANGLIA_SERVER_OLD)) {
-            clusterGangliaCollectorMap.put(clusterName, clusterHostMap.get(clusterName).get(hostName));
-          }
-        }
-      }
-    } catch (UnsupportedPropertyException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught UnsupportedPropertyException while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    } catch (NoSuchResourceException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught NoSuchResourceException exception while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    } catch (NoSuchParentResourceException e) {
-      if (LOG.isErrorEnabled()) {
-        LOG.error("Caught NoSuchParentResourceException exception while trying to get the host mappings.", e);
-      }
-      throw new SystemException("An exception occurred while initializing the host mappings: " + e, e);
-    }
   }
 }

+ 21 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ResourceImpl.java

@@ -55,6 +55,27 @@ public class ResourceImpl implements Resource {
     this.type = type;
   }
 
+  /**
+   * Copy constructor
+   *
+   * @param resource  the resource to copy
+   */
+  public ResourceImpl(Resource resource) {
+    this.type = resource.getType();
+
+    for (Map.Entry<String, Map<String, Object>> categoryEntry : resource.getPropertiesMap().entrySet()) {
+      String category = categoryEntry.getKey();
+      Map<String, Object> propertyMap = categoryEntry.getValue();
+      if (propertyMap != null) {
+        for (Map.Entry<String, Object> propertyEntry : propertyMap.entrySet()) {
+          String propertyId    = (category == null ? "" : category + "/") + propertyEntry.getKey();
+          Object propertyValue = propertyEntry.getValue();
+          setProperty(propertyId, propertyValue);
+        }
+      }
+    }
+  }
+
 
   // ----- Resource ----------------------------------------------------------
 

+ 7 - 17
ambari-server/src/main/java/org/apache/ambari/server/controller/jdbc/JDBCProviderModule.java

@@ -18,32 +18,22 @@
 
 package org.apache.ambari.server.controller.jdbc;
 
-import org.apache.ambari.server.controller.internal.DefaultProviderModule;
+import org.apache.ambari.server.controller.internal.AbstractProviderModule;
 import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.spi.ResourceProvider;
 import org.apache.ambari.server.controller.utilities.DBHelper;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 
 /**
- * The default provider module implementation.
+ * A provider module implementation that uses the JDBC resource provider.
  */
-public class JDBCProviderModule extends DefaultProviderModule {
-
-
-  // ----- Constructors ------------------------------------------------------
-
-  /**
-   * Create a default provider module.
-   */
-  public JDBCProviderModule() {
-    super();
-  }
-
+public class JDBCProviderModule extends AbstractProviderModule {
   // ----- utility methods ---------------------------------------------------
 
   @Override
-  protected void createResourceProvider(Resource.Type type) {
-    putResourceProvider( type, new JDBCResourceProvider(DBHelper.CONNECTION_FACTORY, type,
+  protected ResourceProvider createResourceProvider(Resource.Type type) {
+    return new JDBCResourceProvider(DBHelper.CONNECTION_FACTORY, type,
         PropertyHelper.getPropertyIds(type),
-        PropertyHelper.getKeyPropertyIds(type)));
+        PropertyHelper.getKeyPropertyIds(type));
   }
 }

+ 7 - 0
ambari-server/src/main/java/org/apache/ambari/server/controller/spi/ProviderModule.java

@@ -33,5 +33,12 @@ public interface ProviderModule {
    */
   public ResourceProvider getResourceProvider(Resource.Type type);
 
+  /**
+   * Get the list of property providers for the given resource type.
+   *
+   * @param type  the resource type
+   *
+   * @return the list of property providers
+   */
   public List<PropertyProvider> getPropertyProviders(Resource.Type type);
 }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/ServiceConfigMappingDAO.java

@@ -58,7 +58,7 @@ public class ServiceConfigMappingDAO {
                 + " WHERE "
                 + " config.clusterId = ?1"
                 + " AND config.serviceName = ?2"
-                + " AND config.configType IN ?5",
+                + " AND config.configType IN ?3",
             ServiceConfigMappingEntity.class);
     return daoUtils.selectList(query, clusterId, serviceName, configTypes);
   }

+ 22 - 12
ambari-server/src/main/java/org/apache/ambari/server/utils/StageUtils.java

@@ -17,10 +17,26 @@
  */
 package org.apache.ambari.server.utils;
 
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+
+import javax.xml.bind.JAXBException;
+
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.ServiceComponent;
 import org.apache.ambari.server.state.svccomphost.ServiceComponentHostInstallEvent;
@@ -32,15 +48,6 @@ import org.codehaus.jackson.map.JsonMappingException;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.SerializationConfig;
 
-import javax.xml.bind.JAXBException;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.util.*;
-
 public class StageUtils {
   private static Log LOG = LogFactory.getLog(StageUtils.class);
   
@@ -99,6 +106,7 @@ public class StageUtils {
     Stage s = new Stage(requestId, "/tmp", "cluster1");
     s.setStageId(stageId);
     long now = System.currentTimeMillis();
+    String filename = null;
     s.addHostRoleExecutionCommand(hostname, Role.NAMENODE, RoleCommand.INSTALL,
         new ServiceComponentHostInstallEvent("NAMENODE", hostname, now, "HDP-1.2.0"),
         "cluster1", "HDFS");
@@ -150,7 +158,8 @@ public class StageUtils {
     return mapper.readValue(is, clazz);
   }
   
-  public static Map<String, List<String>> getClusterHostInfo(Cluster cluster) {
+  
+  public static Map<String, List<String>> getClusterHostInfo(Cluster cluster, HostsMap hostsMap) {
     Map<String, List<String>> info = new HashMap<String, List<String>>();
     if (cluster.getServices() != null) {
       for (String serviceName : cluster.getServices().keySet()) {
@@ -168,12 +177,13 @@ public class StageUtils {
                 && !scomp.getServiceComponentHosts().isEmpty()) {
               List<String> hostList = new ArrayList<String>();
               for (String host: scomp.getServiceComponentHosts().keySet()) {
-                hostList.add(host);
+                String mappedHost = hostsMap.getHostMap(host);
+                hostList.add(mappedHost);
               }
               info.put(clusterInfoKey, hostList);
             }
             //Add ambari db server
-            info.put("ambari_db_server_host", Arrays.asList(getHostName()));
+            info.put("ambari_db_server_host", Arrays.asList(hostsMap.getHostMap(getHostName())));
           }
         }
       }

+ 35 - 12
ambari-server/src/main/python/ambari-server.py

@@ -33,6 +33,7 @@ import stat
 import fileinput
 import urllib2
 import time
+import getpass
 # debug settings
 VERBOSE = False
 SILENT = False
@@ -64,8 +65,14 @@ IP_TBLS_DISABLED="Firewall is stopped.\n"
 IP_TBLS_SRVC_NT_FND="iptables: unrecognized service"
 
 # server commands
-SERVER_START_CMD="{0}" + os.sep + "bin" + os.sep + "java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC -Xms512m -Xmx2048m -cp {1}"+ os.pathsep + "{2}" + "/* org.apache.ambari.server.controller.AmbariServer >/var/log/ambari-server/ambari-server.out 2>&1"
-SERVER_START_CMD_DEBUG="{0}" + os.sep + "bin" + os.sep + "java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC -Xms512m -Xmx2048m -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n -cp {1}"+ os.pathsep + ".." + os.sep + "lib" + os.sep + "ambari-server" + os.sep + "* org.apache.ambari.server.controller.AmbariServer"
+ambari_provider_module_option = ""
+ambari_provider_module = os.environ.get('AMBARI_PROVIDER_MODULE')
+
+if ambari_provider_module is not None:
+  ambari_provider_module_option = "-Dprovider.module.class=" + ambari_provider_module + " "
+
+SERVER_START_CMD="{0}" + os.sep + "bin" + os.sep + "java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC " + ambari_provider_module_option + os.getenv('AMBARI_JVM_ARGS','-Xms512m -Xmx2048m') + " -cp {1}"+ os.pathsep + "{2}" + "/* org.apache.ambari.server.controller.AmbariServer >/var/log/ambari-server/ambari-server.out 2>&1"
+SERVER_START_CMD_DEBUG="{0}" + os.sep + "bin" + os.sep + "java -server -XX:NewRatio=2 -XX:+UseConcMarkSweepGC " + ambari_provider_module_option + os.getenv('AMBARI_JVM_ARGS','-Xms512m -Xmx2048m') + " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n -cp {1}"+ os.pathsep + ".." + os.sep + "lib" + os.sep + "ambari-server" + os.sep + "* org.apache.ambari.server.controller.AmbariServer"
 AMBARI_CONF_VAR="AMBARI_CONF_DIR"
 AMBARI_SERVER_LIB="AMBARI_SERVER_LIB"
 JAVA_HOME="JAVA_HOME"
@@ -84,6 +91,7 @@ PG_HBA_CONF_FILE = PG_HBA_DIR + "pg_hba.conf"
 PG_HBA_CONF_FILE_BACKUP = PG_HBA_DIR + "pg_hba_bak.conf.old"
 POSTGRESQL_CONF_FILE = PG_HBA_DIR + "postgresql.conf"
 PG_HBA_RELOAD_CMD = "sudo -u postgres pg_ctl -D {0} reload"
+PG_DEFAULT_PASSWORD = "bigdata"
 JDBC_USER_NAME_PROPERTY = "server.jdbc.user.name"
 JDBC_PASSWORD_FILE_PROPERTY = "server.jdbc.user.passwd"
 JDBC_PASSWORD_FILENAME = "password.dat"
@@ -516,7 +524,7 @@ accepting will cancel the Ambari Server setup.\nDo you accept the Oracle Binary
   os.chdir(savedPath)
   jdk_version = re.search('Creating (jdk.*)/jre', out).group(1)
   print "Successfully installed JDK to {0}/{1}".format(JDK_INSTALL_DIR, jdk_version)
-  writeProperty("java.home", "{0}/{1}".format(JDK_INSTALL_DIR, jdk_version))
+  writeProperty(JAVA_HOME_PROPERTY, "{0}/{1}".format(JDK_INSTALL_DIR, jdk_version))
   return 0
 
 def get_postgre_status():
@@ -584,7 +592,7 @@ def get_JAVA_HOME():
   
   try:
     properties.load(open(conf_file))
-    java_home = properties['java.home']
+    java_home = properties[JAVA_HOME_PROPERTY]
     if (not 0 == len(java_home)) and (os.path.exists(java_home)):
       return java_home
   except (Exception), e:
@@ -819,12 +827,14 @@ def getChoiceStringInput(prompt,default,firstChoice,secondChoice):
     return False
   return default
 
-def getValidatedStringInput(prompt, default, pattern, description):
+def getValidatedStringInput(prompt, default, pattern, description, is_pass):
   input =""
   while (not input):
     if (SILENT):
       print (prompt)
       input = default
+    elif is_pass:
+      input = getpass.getpass(prompt)
     else:
       input = raw_input(prompt)
     if(not input.strip()):
@@ -850,6 +860,22 @@ def saveUsernamePassword(pathtofile, username, password):
   shutil.move(pathtofile, pathtofile+'.bak.ambari')
   tree.write(pathtofile)
 
+def configurePostgresPassword():
+  # setup password
+  passwordDefault = PG_DEFAULT_PASSWORD
+  passwordPrompt = 'Password [' + passwordDefault + ']: '
+  passwordPattern = "^[a-zA-Z0-9_-]*$"
+  passwordDescr = "Invalid characters in password. Use only alphanumeric or _ or - characters"
+
+  password = getValidatedStringInput(passwordPrompt, passwordDefault, passwordPattern, passwordDescr, True)
+  if password != passwordDefault:
+    password1 = getValidatedStringInput("Re-enter password: ", passwordDefault, passwordPattern, passwordDescr, True)
+    if password != password1:
+      print "Passwords do not match"
+      password = configurePostgresPassword()
+
+  return password
+
 def configurePostgresUsernamePassword(args):
   conf_file = search_file(AMBARI_PROPERTIES_FILE, get_conf_dir())
   properties = Properties()
@@ -878,17 +904,14 @@ def configurePostgresUsernamePassword(args):
   username = usernameDefault
 
   # setup password
-  passwordDefault = 'bigdata'
-  passwordPrompt = 'Password [' + passwordDefault + ']: '
-  passwordPattern = "^[a-zA-Z0-9_-]*$"
-  passwordDescr = "Invalid characters in password. Use only alphanumeric or _ or - characters"
-  password = passwordDefault
+  password = PG_DEFAULT_PASSWORD
 
   ok = getYNInput("Enter advanced database configuration [y/n] (n)? ", False)
   if ok == True:
-    username = getValidatedStringInput(usernamePrompt, usernameDefault, usernamePattern, usernameDescr)
+    username = getValidatedStringInput(usernamePrompt, usernameDefault, usernamePattern, usernameDescr, False)
     print "Database username set to: " + username
-    password = getValidatedStringInput(passwordPrompt, passwordDefault, passwordPattern, passwordDescr)
+    password = configurePostgresPassword()
+        
 
   passFilePath = os.path.join(os.path.dirname(conf_file), JDBC_PASSWORD_FILENAME)
   

+ 8 - 8
ambari-server/src/main/resources/stacks/HDP/1.2.0/repos/repoinfo.xml

@@ -18,7 +18,7 @@
 <reposinfo>
   <os type="centos6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -31,7 +31,7 @@
   </os>
   <os type="centos5">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -44,7 +44,7 @@
   </os>
   <os type="redhat6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -57,7 +57,7 @@
   </os>
   <os type="redhat5">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -70,12 +70,12 @@
   </os>
   <os type="suse11">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
       <repoid>HDP-UTILS-1.1.0.15</repoid>
       <reponame>HDP-UTILS</reponame>
       <mirrorslist></mirrorslist>
@@ -83,12 +83,12 @@
   </os>
     <os type="sles11">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
       <repoid>HDP-UTILS-1.1.0.15</repoid>
       <reponame>HDP-UTILS</reponame>
       <mirrorslist></mirrorslist>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/GANGLIA/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Ganglia Metrics Collection system</comment>
-    <version>1.0</version>
+    <version>3.2.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HBASE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>mapred</user>
     <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.2.1-1</version>
+    <version>0.94.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HCATALOG/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for HCATALOG service</comment>
-    <version>0.4.0.1-1</version>
+    <version>0.5.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HDFS/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.1.1-1</version>
+    <version>1.1.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/HIVE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.9.0.1-1</version>
+    <version>0.10.0</version>
 
     <components>        
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/MAPREDUCE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>mapred</user>
     <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.1.1-1</version>
+    <version>1.1.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/NAGIOS/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Nagios Monitoring and Alerting system</comment>
-    <version>1.0</version>
+    <version>3.2.3</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/OOZIE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0.1-1</version>
+    <version>3.2.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/PIG/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.0.1-1</version>
+    <version>0.10.1</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/SQOOP/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2.1-1</version>
+    <version>1.4.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/WEBHCAT/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for WEBHCAT service</comment>
-    <version>0.1.4.1-1</version>
+    <version>0.5.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.2.0/services/ZOOKEEPER/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5.1-1</version>
+    <version>3.4.5</version>
 
     <components>
         <component>

+ 8 - 8
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/repos/repoinfo.xml

@@ -18,7 +18,7 @@
 <reposinfo>
   <os type="centos6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -31,7 +31,7 @@
   </os>
   <os type="centos5">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -44,7 +44,7 @@
   </os>
   <os type="redhat6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos6</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -57,7 +57,7 @@
   </os>
   <os type="redhat5">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/centos5</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
@@ -70,12 +70,12 @@
   </os>
   <os type="suse11">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
       <repoid>HDP-UTILS-1.1.0.15</repoid>
       <reponame>HDP-UTILS</reponame>
       <mirrorslist></mirrorslist>
@@ -83,12 +83,12 @@
   </os>
     <os type="sles11">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-1.2.0/repos/suse11</baseurl>
       <repoid>HDP-1.2.0</repoid>
       <reponame>HDP</reponame>
     </repo>
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.15/repos/suse11</baseurl>
       <repoid>HDP-UTILS-1.1.0.15</repoid>
       <reponame>HDP-UTILS</reponame>
       <mirrorslist></mirrorslist>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/GANGLIA/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Ganglia Metrics Collection system</comment>
-    <version>1.0</version>
+    <version>3.2.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HBASE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>mapred</user>
     <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.2.1-1</version>
+    <version>0.94.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HCATALOG/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for HCATALOG service</comment>
-    <version>0.4.0.1-1</version>
+    <version>0.5.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HDFS/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.1.1.1-1</version>
+    <version>1.1.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/HIVE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.9.0.1-1</version>
+    <version>0.10.0</version>
 
     <components>        
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/MAPREDUCE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>mapred</user>
     <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.1.1.1-1</version>
+    <version>1.1.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/NAGIOS/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Nagios Monitoring and Alerting system</comment>
-    <version>1.0</version>
+    <version>3.2.3</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/OOZIE/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>System for workflow coordination and execution of Apache Hadoop jobs</comment>
-    <version>3.2.0.1-1</version>
+    <version>3.2.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/PIG/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.10.0.1-1</version>
+    <version>0.10.1</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/SQOOP/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.2.1-1</version>
+    <version>1.4.2</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/WEBHCAT/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for WEBHCAT service</comment>
-    <version>0.1.4.1-1</version>
+    <version>0.5.0</version>
 
     <components>
         <component>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDPLocal/1.2.0/services/ZOOKEEPER/metainfo.xml

@@ -18,7 +18,7 @@
 <metainfo>
     <user>root</user>
     <comment>This is comment for ZOOKEEPER service</comment>
-    <version>3.4.5.1-1</version>
+    <version>3.4.5</version>
 
     <components>
         <component>

+ 4 - 1
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionDBAccessorImpl.java

@@ -28,6 +28,7 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.orm.dao.ExecutionCommandDAO;
@@ -76,7 +77,9 @@ public class TestActionDBAccessorImpl {
     clusters.getHost(hostName).persist();
     clusters.addCluster(clusterName);
     db = injector.getInstance(ActionDBAccessorImpl.class);
-    am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db);
+    
+    am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
+        new HostsMap((String) null));
   }
 
   @After

+ 3 - 2
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionManager.java

@@ -31,6 +31,7 @@ import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.CommandReport;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Clusters;
@@ -74,7 +75,7 @@ public class TestActionManager {
   public void testActionResponse() {
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
     ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(),
-        clusters, db);
+        clusters, db, new HostsMap((String) null));
     populateActionDB(db, hostname);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());
@@ -110,7 +111,7 @@ public class TestActionManager {
   public void testLargeLogs() {
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
     ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(),
-        clusters, db);
+        clusters, db, new HostsMap((String) null));
     populateActionDB(db, hostname);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());

+ 4 - 2
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestActionScheduler.java

@@ -34,6 +34,7 @@ import org.apache.ambari.server.actionmanager.ActionScheduler.RoleStats;
 import org.apache.ambari.server.agent.ActionQueue;
 import org.apache.ambari.server.agent.AgentCommand;
 import org.apache.ambari.server.agent.ExecutionCommand;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.state.Cluster;
 import org.apache.ambari.server.state.Clusters;
 import org.apache.ambari.server.state.Service;
@@ -73,7 +74,7 @@ public class TestActionScheduler {
     //Keep large number of attempts so that the task is not expired finally
     //Small action timeout to test rescheduling
     ActionScheduler scheduler = new ActionScheduler(100, 100, db, aq, fsm,
-        10000);
+        10000, new HostsMap((String) null));
     scheduler.setTaskTimeoutAdjustment(false);
     // Start the thread
     scheduler.start();
@@ -137,7 +138,8 @@ public class TestActionScheduler {
     db.persistActions(stages);
 
     //Small action timeout to test rescheduling
-    ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3);
+    ActionScheduler scheduler = new ActionScheduler(100, 50, db, aq, fsm, 3, 
+        new HostsMap((String) null));
     scheduler.setTaskTimeoutAdjustment(false);
     // Start the thread
     scheduler.start();

+ 1 - 0
ambari-server/src/test/java/org/apache/ambari/server/actionmanager/TestStage.java

@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
 
 import org.apache.ambari.server.Role;
 import org.apache.ambari.server.RoleCommand;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.utils.StageUtils;
 import org.junit.Test;
 

+ 12 - 9
ambari-server/src/test/java/org/apache/ambari/server/agent/TestHeartbeatHandler.java

@@ -54,6 +54,7 @@ import org.apache.ambari.server.actionmanager.Stage;
 import org.apache.ambari.server.agent.HostStatus.Status;
 import org.apache.ambari.server.api.services.AmbariMetaInfo;
 import org.apache.ambari.server.configuration.Configuration;
+import org.apache.ambari.server.controller.HostsMap;
 import org.apache.ambari.server.orm.GuiceJpaInitializer;
 import org.apache.ambari.server.orm.InMemoryDefaultTestModule;
 import org.apache.ambari.server.state.Cluster;
@@ -108,7 +109,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testHeartbeat() throws Exception {
     ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl());
+        new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     fsm.addHost(hostname);
@@ -148,7 +149,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testStatusHeartbeat() throws Exception {
     ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl());
+            new ActionDBInMemoryImpl(), new HostsMap((String) null));
     final String hostname = "host1";
     String clusterName = "cluster1";
     String serviceName = "HDFS";
@@ -236,7 +237,8 @@ public class TestHeartbeatHandler {
     clusters.getHost(hostname).persist();
     clusters.addCluster(clusterName);
     ActionDBAccessor db = injector.getInstance(ActionDBAccessorImpl.class);
-    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db);
+    ActionManager am = new ActionManager(5000, 1200000, new ActionQueue(), clusters, db,
+        new HostsMap((String) null));
     populateActionDB(db, hostname);
     Stage stage = db.getAllStages(requestId).get(0);
     Assert.assertEquals(stageId, stage.getStageId());
@@ -267,6 +269,7 @@ public class TestHeartbeatHandler {
   private void populateActionDB(ActionDBAccessor db, String hostname) {
     Stage s = new Stage(requestId, "/a/b", "cluster1");
     s.setStageId(stageId);
+    String filename = null;
     s.addHostRoleExecutionCommand(hostname, Role.HBASE_MASTER,
         RoleCommand.START,
         new ServiceComponentHostStartEvent(Role.HBASE_MASTER.toString(),
@@ -281,7 +284,7 @@ public class TestHeartbeatHandler {
   public void testRegistration() throws AmbariException,
       InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl());
+        new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -308,7 +311,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testRegistrationPublicHostname() throws AmbariException, InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl());
+        new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -341,7 +344,7 @@ public class TestHeartbeatHandler {
   public void testInvalidOSRegistration() throws AmbariException,
       InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl());
+        new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     HeartBeatHandler handler = new HeartBeatHandler(fsm, new ActionQueue(), am,
@@ -370,7 +373,7 @@ public class TestHeartbeatHandler {
   public void testRegisterNewNode()
       throws AmbariException, InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
-        new ActionDBInMemoryImpl());
+        new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     fsm.addHost(hostname);
@@ -460,7 +463,7 @@ public class TestHeartbeatHandler {
     when(hm.generateStatusCommands(anyString())).thenReturn(dummyCmds);
 
     ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl());
+            new ActionDBInMemoryImpl(), new HostsMap((String) null));
     Clusters fsm = clusters;
     String hostname = "host1";
     ActionQueue actionQueue = new ActionQueue();
@@ -487,7 +490,7 @@ public class TestHeartbeatHandler {
   @Test
   public void testTaskInProgressHandling() throws AmbariException, InvalidStateTransitionException {
     ActionManager am = new ActionManager(0, 0, null, null,
-            new ActionDBInMemoryImpl());
+            new ActionDBInMemoryImpl(), new HostsMap((String) null));
     final String hostname = "host1";
     String clusterName = "cluster1";
     String serviceName = "HDFS";

+ 3 - 3
ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerImplTest.java

@@ -56,7 +56,7 @@ public class AmbariManagementControllerImplTest {
     injector.injectMembers(capture(controllerCapture));
     expect(injector.getInstance(Gson.class)).andReturn(null);
 
-    // getClusters
+    // getCluster
     expect(clusters.getCluster("cluster1")).andReturn(cluster);
     expect(cluster.convertToResponse()).andReturn(response);
 
@@ -95,7 +95,7 @@ public class AmbariManagementControllerImplTest {
     injector.injectMembers(capture(controllerCapture));
     expect(injector.getInstance(Gson.class)).andReturn(null);
 
-    // getClusters
+    // getCluster
     expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1"));
 
     // replay mocks
@@ -148,7 +148,7 @@ public class AmbariManagementControllerImplTest {
     injector.injectMembers(capture(controllerCapture));
     expect(injector.getInstance(Gson.class)).andReturn(null);
 
-    // getClusters
+    // getCluster
     expect(clusters.getCluster("cluster1")).andThrow(new ClusterNotFoundException("cluster1"));
     expect(clusters.getCluster("cluster2")).andReturn(cluster);
     expect(clusters.getCluster("cluster3")).andReturn(cluster2);

+ 100 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerClusterProviderTest.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Set;
+
+/**
+ * Tests for GSInstallerClusterProvider
+ */
+public class GSInstallerClusterProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+
+    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
+    Assert.assertEquals(1, resources.size());
+    Assert.assertEquals("ambari", resources.iterator().next().getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
+  }
+
+  @Test
+  public void testGetResourcesWithPredicate() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+
+    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
+
+    Predicate predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("ambari").toPredicate();
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(1, resources.size());
+    Assert.assertEquals("ambari", resources.iterator().next().getPropertyValue(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID));
+
+    predicate = new PredicateBuilder().property(GSInstallerClusterProvider.CLUSTER_NAME_PROPERTY_ID).equals("non-existent Cluster").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertTrue(resources.isEmpty());
+  }
+
+  @Test
+  public void testCreateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
+
+    try {
+      provider.createResources(PropertyHelper.getReadRequest());
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testUpdateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
+
+    try {
+      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testDeleteResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerClusterProvider(clusterDefinition);
+
+    try {
+      provider.deleteResources(null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+}

+ 100 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerComponentProviderTest.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Set;
+
+/**
+ * Tests for GSInstallerComponentProvider.
+ */
+public class GSInstallerComponentProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
+    Assert.assertEquals(25, resources.size());
+  }
+
+  @Test
+  public void testGetResourcesWithPredicate() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
+    Predicate predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").toPredicate();
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(1, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("TASKTRACKER").or().
+        property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("GANGLIA_MONITOR").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(2, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerComponentProvider.COMPONENT_COMPONENT_NAME_PROPERTY_ID).equals("BadComponent").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertTrue(resources.isEmpty());
+  }
+
+  @Test
+  public void testCreateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
+
+    try {
+      provider.createResources(PropertyHelper.getReadRequest());
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testUpdateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
+
+    try {
+      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testDeleteResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerComponentProvider(clusterDefinition);
+
+    try {
+      provider.deleteResources(null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+}

+ 95 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostComponentProviderTest.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Set;
+
+/**
+ *
+ */
+public class GSInstallerHostComponentProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
+    Assert.assertEquals(33, resources.size());
+  }
+
+  @Test
+  public void testGetResourcesWithPredicate() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
+    Predicate predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(5, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerHostComponentProvider.HOST_COMPONENT_HOST_NAME_PROPERTY_ID).equals("UnknownHost").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertTrue(resources.isEmpty());
+  }
+
+  @Test
+  public void testCreateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
+
+    try {
+      provider.createResources(PropertyHelper.getReadRequest());
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testUpdateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
+
+    try {
+      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testDeleteResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostComponentProvider(clusterDefinition);
+
+    try {
+      provider.deleteResources(null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+}

+ 101 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerHostProviderTest.java

@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Set;
+
+/**
+ *
+ */
+public class GSInstallerHostProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
+    Assert.assertEquals(5, resources.size());
+  }
+
+  @Test
+  public void testGetResourcesWithPredicate() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
+    Predicate predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").toPredicate();
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(1, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-190-97-104.ec2.internal").or().
+        property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("ip-10-8-113-183.ec2.internal").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(2, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerHostProvider.HOST_NAME_PROPERTY_ID).equals("unknownHost").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertTrue(resources.isEmpty());
+  }
+
+  @Test
+  public void testCreateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
+
+    try {
+      provider.createResources(PropertyHelper.getReadRequest());
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testUpdateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
+
+    try {
+      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testDeleteResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerHostProvider(clusterDefinition);
+
+    try {
+      provider.deleteResources(null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+}
+

+ 100 - 0
ambari-server/src/test/java/org/apache/ambari/server/controller/gsinstaller/GSInstallerServiceProviderTest.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ambari.server.controller.gsinstaller;
+
+import junit.framework.Assert;
+import org.apache.ambari.server.controller.spi.Predicate;
+import org.apache.ambari.server.controller.spi.Resource;
+import org.apache.ambari.server.controller.utilities.PredicateBuilder;
+import org.apache.ambari.server.controller.utilities.PropertyHelper;
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Set;
+
+/**
+ *
+ */
+public class GSInstallerServiceProviderTest {
+
+  @Test
+  public void testGetResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), null);
+    Assert.assertEquals(12, resources.size());
+  }
+
+  @Test
+  public void testGetResourcesWithPredicate() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
+    Predicate predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("MAPREDUCE").toPredicate();
+    Set<Resource> resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(1, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("GANGLIA").or().
+        property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NAGIOS").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertEquals(2, resources.size());
+
+    predicate = new PredicateBuilder().property(GSInstallerServiceProvider.SERVICE_SERVICE_NAME_PROPERTY_ID).equals("NO SERVICE").toPredicate();
+    resources = provider.getResources(PropertyHelper.getReadRequest(), predicate);
+    Assert.assertTrue(resources.isEmpty());
+  }
+
+  @Test
+  public void testCreateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
+
+    try {
+      provider.createResources(PropertyHelper.getReadRequest());
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testUpdateResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
+
+    try {
+      provider.updateResources(PropertyHelper.getUpdateRequest(new HashMap<String, Object>()), null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+
+  @Test
+  public void testDeleteResources() throws Exception {
+    ClusterDefinition clusterDefinition = new ClusterDefinition();
+    GSInstallerResourceProvider provider = new GSInstallerServiceProvider(clusterDefinition);
+
+    try {
+      provider.deleteResources(null);
+      Assert.fail("Expected UnsupportedOperationException.");
+    } catch (UnsupportedOperationException e) {
+      //expected
+    }
+  }
+}

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels