ソースを参照

Merge branch 'trunk' into branch-alerts-dev

Conflicts:
	ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
	ambari-server/src/test/java/org/apache/ambari/server/controller/AmbariManagementControllerTest.java
Jonathan Hurley 10 年 前
コミット
17b8e79904
100 ファイル変更853 行追加1324 行削除
  1. 1 1
      ambari-admin/src/main/resources/ui/admin-web/app/views/leftNavbar.html
  2. 1 1
      ambari-agent/src/main/package/rpm/preremove.sh
  3. 3 3
      ambari-agent/src/test/python/ambari_agent/TestActualConfigHandler.py
  4. 2 2
      ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py
  5. 31 6
      ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py
  6. 0 7
      ambari-server/docs/api/v1/services.md
  7. 9 1
      ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java
  8. 32 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariHandlerList.java
  9. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProvider.java
  10. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java
  11. 29 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java
  12. 2 4
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RecommendationResourceProvider.java
  13. 2 4
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ValidationResourceProvider.java
  14. 5 5
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewInstanceResourceProvider.java
  15. 2 2
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewPermissionResourceProvider.java
  16. 4 4
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProvider.java
  17. 0 1
      ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java
  18. 4 4
      ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java
  19. 3 5
      ambari-server/src/main/java/org/apache/ambari/server/state/Service.java
  20. 135 7
      ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java
  21. 6 4
      ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java
  22. 1 1
      ambari-server/src/main/package/rpm/preremove.sh
  23. 0 2
      ambari-server/src/main/resources/custom_actions/validate_configs.py
  24. 2 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py
  25. 1 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json
  26. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/webhcat-env.xml
  27. 0 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/webhcat-site.xml
  28. 82 72
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml
  29. 0 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/templetonSmoke.sh
  30. 28 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py
  31. 2 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py
  32. 3 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py
  33. 0 0
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat.py
  34. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_server.py
  35. 2 2
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_service.py
  36. 19 3
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_service_check.py
  37. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml
  38. 0 6
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
  39. 1 1
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2
  40. 0 103
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml
  41. 0 20
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py
  42. 0 78
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py
  43. 0 44
      ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py
  44. 1 3
      ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json
  45. 0 5
      ambari-server/src/main/resources/stacks/HDP/1.3/services/HIVE/metainfo.xml
  46. 9 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml
  47. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py
  48. 1 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json
  49. 10 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml
  50. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml
  51. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml
  52. 91 74
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml
  53. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh
  54. 42 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py
  55. 2 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py
  56. 1 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py
  57. 0 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py
  58. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py
  59. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py
  60. 18 3
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py
  61. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml
  62. 1 7
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
  63. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2
  64. 2 1
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py
  65. 8 4
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py
  66. 0 110
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml
  67. 0 20
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py
  68. 0 102
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py
  69. 0 45
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py
  70. 1 3
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json
  71. 58 42
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/metainfo.xml
  72. 0 143
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/WEBHCAT/configuration/webhcat-site.xml
  73. 0 46
      ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/WEBHCAT/metainfo.xml
  74. 1 2
      ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json
  75. 6 3
      ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py
  76. 47 15
      ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/metainfo.xml
  77. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/configuration/storm-env.xml
  78. 0 143
      ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/configuration/webhcat-site.xml
  79. 0 47
      ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/metainfo.xml
  80. 0 4
      ambari-server/src/main/resources/stacks/HDP/2.2.1/services/HIVE/metainfo.xml
  81. 0 26
      ambari-server/src/main/resources/stacks/HDP/2.2.1/services/WEBHCAT/metainfo.xml
  82. 6 21
      ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml
  83. 11 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml
  84. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml
  85. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml
  86. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml
  87. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml
  88. 2 2
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml
  89. 5 5
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml
  90. 40 8
      ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml
  91. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml
  92. 26 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml
  93. 1 4
      ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml
  94. 14 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml
  95. 11 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml
  96. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml
  97. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml
  98. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml
  99. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml
  100. 3 3
      ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml

+ 1 - 1
ambari-admin/src/main/resources/ui/admin-web/app/views/leftNavbar.html

@@ -57,7 +57,7 @@
           <li ng-class="{active: isActive('clusters.manageAccess')}">
             <a href="#/clusters/{{cluster.Clusters.cluster_name}}/manageAccess" class="permissions">Permissions</a>
           </li>
-          <li><a href="/#/main/dashboard" class="gotodashboard" target="{{cluster.Clusters.cluster_name}}">Go to Dashboard</a></li>
+          <li><a href="/#/main/dashboard" class="gotodashboard">Go to Dashboard</a></li>
         </ul>
       </div>
         

+ 1 - 1
ambari-agent/src/main/package/rpm/preremove.sh

@@ -29,7 +29,7 @@ if [ "$1" -eq 0 ]; then  # Action is uninstall
       /var/lib/ambari-agent/install-helper.sh remove
     fi
 
-    chkconfig --del ambari-agent
+    chkconfig --list | grep ambari-server && chkconfig --del ambari-server
 fi
 
 exit 0

+ 3 - 3
ambari-agent/src/test/python/ambari_agent/TestActualConfigHandler.py

@@ -34,8 +34,8 @@ class TestActualConfigHandler(TestCase):
   def setUp(self):
     LiveStatus.SERVICES = [
       "HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
-      "NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
-      "KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT",
+      "NAGIOS", "ZOOKEEPER", "OOZIE",
+      "KERBEROS", "TEMPLETON", "HIVE",
       "YARN", "MAPREDUCE2", "FLUME", "TEZ",
       "FALCON", "STORM"
     ]
@@ -108,7 +108,7 @@ class TestActualConfigHandler(TestCase):
        "componentName" : "HIVE_METASTORE"},
       {"serviceName" : "HIVE",
        "componentName" : "MYSQL_SERVER"},
-      {"serviceName" : "WEBHCAT",
+      {"serviceName" : "HIVE",
        "componentName" : "WEBHCAT_SERVER"},
       {"serviceName" : "YARN",
        "componentName" : "RESOURCEMANAGER"},

+ 2 - 2
ambari-agent/src/test/python/ambari_agent/TestLiveStatus.py

@@ -36,8 +36,8 @@ class TestLiveStatus(TestCase):
     sys.stdout = out
     LiveStatus.SERVICES = [
       "HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
-      "NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
-      "KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT",
+      "NAGIOS", "ZOOKEEPER", "OOZIE",
+      "KERBEROS", "TEMPLETON", "HIVE",
       "YARN", "MAPREDUCE2", "FLUME", "TEZ",
       "FALCON", "STORM"
     ]

+ 31 - 6
ambari-agent/src/test/python/resource_management/TestExecuteHadoopResource.py

@@ -15,6 +15,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
 '''
+import os
 
 from unittest import TestCase
 from mock.mock import patch
@@ -38,7 +39,11 @@ class TestExecuteHadoopResource(TestCase):
       self.assertEqual(execute_mock.call_count, 1)
       self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
       self.assertEqual(execute_mock.call_args[0][0].arguments,
-                       {'logoutput': True, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+                       {'logoutput': True,
+                        'tries': 1,
+                        'user': 'user',
+                        'try_sleep': 0,
+                        'environment': {'PATH': os.environ['PATH']}})
 
 
   @patch("resource_management.core.providers.system.ExecuteProvider")
@@ -58,7 +63,11 @@ class TestExecuteHadoopResource(TestCase):
       self.assertEqual(execute_mock.call_count, 1)
       self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
       self.assertEqual(execute_mock.call_args[0][0].arguments,
-                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+                       {'logoutput': False,
+                        'tries': 1,
+                        'user': 'user',
+                        'try_sleep': 0,
+                        'environment': {'PATH': os.environ['PATH']}})
 
 
   @patch("resource_management.core.providers.system.ExecuteProvider")
@@ -83,7 +92,11 @@ class TestExecuteHadoopResource(TestCase):
       self.assertEqual(execute_mock.call_count, 1)
       self.assertEqual(execute_mock.call_args[0][0].command,'hadoop --config conf_dir command')
       self.assertEqual(execute_mock.call_args[0][0].arguments,
-                       {'logoutput': True, 'tries': 2, 'user': 'user', 'try_sleep': 2})
+                       {'logoutput': True,
+                        'tries': 2,
+                        'user': 'user',
+                        'try_sleep': 2,
+                        'environment': {'PATH': os.environ['PATH']}})
 
 
   @patch("resource_management.core.providers.system.ExecuteProvider")
@@ -105,9 +118,17 @@ class TestExecuteHadoopResource(TestCase):
       self.assertEqual(execute_mock.call_args_list[1][0][0].command,
                        'hadoop --config conf_dir command2')
       self.assertEqual(execute_mock.call_args_list[0][0][0].arguments,
-                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+                       {'logoutput': False,
+                        'tries': 1,
+                        'user': 'user',
+                        'try_sleep': 0,
+                        'environment': {'PATH': os.environ['PATH']}})
       self.assertEqual(execute_mock.call_args_list[1][0][0].arguments,
-                       {'logoutput': False, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+                       {'logoutput': False,
+                        'tries': 1,
+                        'user': 'user',
+                        'try_sleep': 0,
+                        'environment': {'PATH': os.environ['PATH']}})
 
 
   @patch("resource_management.core.providers.system.ExecuteProvider")
@@ -156,7 +177,11 @@ class TestExecuteHadoopResource(TestCase):
       self.assertEqual(execute_mock.call_args_list[1][0][0].command,
                        'hadoop --config conf_dir command')
       self.assertEqual(execute_mock.call_args_list[1][0][0].arguments,
-                       {'logoutput': True, 'tries': 1, 'user': 'user', 'try_sleep': 0})
+                       {'logoutput': True,
+                        'tries': 1,
+                        'user': 'user',
+                        'try_sleep': 0,
+                        'environment': {'PATH': os.environ['PATH']}})
 
 
   @patch("resource_management.core.providers.system.ExecuteProvider")

+ 0 - 7
ambari-server/docs/api/v1/services.md

@@ -77,13 +77,6 @@ Get the collection of the services for the cluster named "c1".
           			"service_name" : "NAGIOS"
           		}
         	},
-        	{
-        		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/HCATALOG",
-        		"ServiceInfo" : {
-        	  		"cluster_name" : "c1",
-        	  		"service_name" : "HCATALOG"
-        	  	}
-        	},
         	{
         		"href" : "http://your.ambari.server/api/v1/clusters/c1/services/PIG",
         		"ServiceInfo" : {

+ 9 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/services/stackadvisor/StackAdvisorRunner.java

@@ -75,7 +75,15 @@ public class StackAdvisorRunner {
         if (exitCode > 0) {
           String errorMessage;
           if (errMessage != null) {
-            errorMessage = errMessage.substring(errMessage.lastIndexOf("\n"));
+            // We want to get the last line.
+            int index = errMessage.lastIndexOf("\n");
+            if (index > 0 && index == (errMessage.length() - 1)) {
+              index = errMessage.lastIndexOf("\n", index - 1); // sentence ended with newline
+            }
+            if (index > -1) {
+              errMessage = errMessage.substring(index + 1).trim();
+            }
+            errorMessage = errMessage;
           } else {
             errorMessage = "Error occurred during stack advisor execution";
           }

+ 32 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariHandlerList.java

@@ -32,6 +32,7 @@ import org.apache.ambari.view.SystemException;
 import org.apache.ambari.view.ViewContext;
 import org.eclipse.jetty.server.Handler;
 import org.eclipse.jetty.server.SessionManager;
+import org.eclipse.jetty.server.session.SessionHandler;
 import org.eclipse.jetty.servlet.FilterHolder;
 import org.eclipse.jetty.webapp.WebAppContext;
 import org.springframework.web.context.WebApplicationContext;
@@ -91,7 +92,7 @@ public class AmbariHandlerList extends FailsafeHandlerList implements ViewInstan
         context.setClassLoader(viewInstanceDefinition.getViewEntity().getClassLoader());
         context.setAttribute(ViewContext.CONTEXT_ATTRIBUTE, new ViewContextImpl(viewInstanceDefinition, viewRegistry));
 
-        context.getSessionHandler().setSessionManager(sessionManager);
+        context.setSessionHandler(new SharedSessionHandler(sessionManager));
         context.getServletContext().setAttribute(WebApplicationContext.ROOT_WEB_APPLICATION_CONTEXT_ATTRIBUTE, springWebAppContext);
         context.addFilter(new FilterHolder(springSecurityFilter), "/*", 1);
 
@@ -142,6 +143,7 @@ public class AmbariHandlerList extends FailsafeHandlerList implements ViewInstan
   public void removeViewInstance(ViewInstanceEntity viewInstanceDefinition) {
     Handler handler = handlerMap.get(viewInstanceDefinition);
     if (handler != null) {
+      handlerMap.remove(viewInstanceDefinition);
       removeHandler(handler);
     }
   }
@@ -181,4 +183,33 @@ public class AmbariHandlerList extends FailsafeHandlerList implements ViewInstan
      */
     public Handler create(ViewInstanceEntity viewInstanceDefinition, String webApp, String contextPath);
   }
+
+
+  // ----- inner class : SharedSessionHandler --------------------------------
+
+  /**
+   * A session handler that shares its session manager with another app.
+   * This handler DOES NOT attempt stop the shared session manager.
+   */
+  private static class SharedSessionHandler extends SessionHandler {
+
+    // ----- Constructors ----------------------------------------------------
+
+    /**
+     * Construct a SharedSessionHandler.
+     *
+     * @param manager  the shared session manager.
+     */
+    public SharedSessionHandler(SessionManager manager) {
+      super(manager);
+    }
+
+
+    // ----- SessionHandler --------------------------------------------------
+
+    @Override
+    protected void doStop() throws Exception {
+      // do nothing...
+    }
+  }
 }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/AmbariPrivilegeResourceProvider.java

@@ -125,7 +125,7 @@ public class AmbariPrivilegeResourceProvider extends PrivilegeResourceProvider<O
     //add view entities
     ViewRegistry viewRegistry = ViewRegistry.getInstance();
     for (ViewEntity viewEntity : viewRegistry.getDefinitions()) {
-      if (viewEntity.isLoaded()) {
+      if (viewEntity.isDeployed()) {
         for (ViewInstanceEntity viewInstanceEntity : viewEntity.getInstances()) {
           resourceEntities.put(viewInstanceEntity.getResource().getId(), viewInstanceEntity);
         }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BaseBlueprintProcessor.java

@@ -851,7 +851,7 @@ public abstract class BaseBlueprintProcessor extends AbstractControllerResourceP
       Collection<DependencyInfo> nagiosDependencies = getDependenciesForComponent("NAGIOS_SERVER");
       for (DependencyInfo dependency : nagiosDependencies) {
         if (dependency.getComponentName().equals("HCAT")) {
-          dependencyConditionalServiceMap.put(dependency, "HCATALOG");
+          dependencyConditionalServiceMap.put(dependency, "HIVE");
         } else if (dependency.getComponentName().equals("OOZIE_CLIENT")) {
           dependencyConditionalServiceMap.put(dependency, "OOZIE");
         } else if (dependency.getComponentName().equals("YARN_CLIENT")) {

+ 29 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/BlueprintConfigurationProcessor.java

@@ -274,6 +274,17 @@ public class BlueprintConfigurationProcessor {
     return hosts;
   }
 
+
+  /**
+   * Provides package-level access to the map of single host topology updaters.
+   * This is useful for facilitating unit-testing of this class.
+   *
+   * @return the map of single host topology updaters
+   */
+  static Map<String, Map<String, PropertyUpdater>> getSingleHostTopologyUpdaters() {
+    return singleHostTopologyUpdaters;
+  }
+
   /**
    * Provides functionality to update a property value.
    */
@@ -296,7 +307,7 @@ public class BlueprintConfigurationProcessor {
    * Topology based updater which replaces the original host name of a property with the host name
    * which runs the associated (master) component in the new cluster.
    */
-  private static class SingleHostTopologyUpdater implements PropertyUpdater {
+  static class SingleHostTopologyUpdater implements PropertyUpdater {
     /**
      * Component name
      */
@@ -341,6 +352,16 @@ public class BlueprintConfigurationProcessor {
         }
       }
     }
+
+    /**
+     * Provides access to the name of the component associated
+     *   with this updater instance.
+     *
+     * @return component name for this updater
+     */
+    public String getComponentName() {
+      return this.component;
+    }
   }
 
   /**
@@ -603,6 +624,8 @@ public class BlueprintConfigurationProcessor {
     Map<String, PropertyUpdater> hiveSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> oozieSiteMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> stormSiteMap = new HashMap<String, PropertyUpdater>();
+    Map<String, PropertyUpdater> falconStartupPropertiesMap = new HashMap<String, PropertyUpdater>();
+
 
     Map<String, PropertyUpdater> mapredEnvMap = new HashMap<String, PropertyUpdater>();
     Map<String, PropertyUpdater> hadoopEnvMap = new HashMap<String, PropertyUpdater>();
@@ -623,6 +646,7 @@ public class BlueprintConfigurationProcessor {
     singleHostTopologyUpdaters.put("hive-site", hiveSiteMap);
     singleHostTopologyUpdaters.put("oozie-site", oozieSiteMap);
     singleHostTopologyUpdaters.put("storm-site", stormSiteMap);
+    singleHostTopologyUpdaters.put("falcon-startup.properties", falconStartupPropertiesMap);
 
     mPropertyUpdaters.put("hadoop-env", hadoopEnvMap);
     mPropertyUpdaters.put("hbase-env", hbaseEnvMap);
@@ -686,6 +710,10 @@ public class BlueprintConfigurationProcessor {
     multiStormSiteMap.put("storm.zookeeper.servers",
         new YamlMultiValuePropertyDecorator(new MultipleHostTopologyUpdater("ZOOKEEPER_SERVER")));
 
+    // FALCON
+    falconStartupPropertiesMap.put("*.broker.url", new SingleHostTopologyUpdater("FALCON_SERVER"));
+
+
     // Required due to AMBARI-4933.  These no longer seem to be required as the default values in the stack
     // are now correct but are left here in case an existing blueprint still contains an old value.
     hadoopEnvMap.put("namenode_heapsize", new MPropertyUpdater());

+ 2 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RecommendationResourceProvider.java

@@ -93,12 +93,10 @@ public class RecommendationResourceProvider extends StackAdvisorResourceProvider
       response = saHelper.recommend(recommendationRequest);
     } catch (StackAdvisorRequestException e) {
       LOG.warn("Error occured during recommnedation", e);
-      throw new WebApplicationException(Response.status(Status.BAD_REQUEST).entity(e.getMessage())
-          .build());
+      throw new IllegalArgumentException(e.getMessage(), e);
     } catch (StackAdvisorException e) {
       LOG.warn("Error occured during recommnedation", e);
-      throw new WebApplicationException(Response.status(Status.INTERNAL_SERVER_ERROR).entity(e.getMessage())
-          .build());
+      throw new SystemException(e.getMessage(), e);
     }
 
     Resource recommendation = createResources(new Command<Resource>() {

+ 2 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ValidationResourceProvider.java

@@ -85,12 +85,10 @@ public class ValidationResourceProvider extends StackAdvisorResourceProvider {
       response = saHelper.validate(validationRequest);
     } catch (StackAdvisorRequestException e) {
       LOG.warn("Error occurred during validation", e);
-      throw new WebApplicationException(Response.status(Status.BAD_REQUEST).entity(e.getMessage())
-          .build());
+      throw new IllegalArgumentException(e.getMessage(), e);
     } catch (StackAdvisorException e) {
       LOG.warn("Error occurred during validation", e);
-      throw new WebApplicationException(Response.status(Status.INTERNAL_SERVER_ERROR).entity(e.getMessage())
-          .build());
+      throw new SystemException(e.getMessage(), e);
     }
 
     Resource validation = createResources(new Command<Resource>() {

+ 5 - 5
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewInstanceResourceProvider.java

@@ -146,7 +146,7 @@ public class ViewInstanceResourceProvider extends AbstractResourceProvider {
 
       for (ViewEntity viewDefinition : viewRegistry.getDefinitions()){
         // do not report instances for views that are not loaded.
-        if (viewDefinition.isLoaded()){
+        if (viewDefinition.isDeployed()){
           if (viewName == null || viewName.equals(viewDefinition.getCommonName())) {
             for (ViewInstanceEntity viewInstanceDefinition : viewRegistry.getInstanceDefinitions(viewDefinition)) {
               if (instanceName == null || instanceName.equals(viewInstanceDefinition.getName())) {
@@ -348,8 +348,8 @@ public class ViewInstanceResourceProvider extends AbstractResourceProvider {
             throw new IllegalStateException("The view " + viewName + " is not registered.");
           }
 
-          // the view must be in the LOADED state to create an instance
-          if (!view.isLoaded()) {
+          // the view must be in the DEPLOYED state to create an instance
+          if (!view.isDeployed()) {
             throw new IllegalStateException("The view " + viewName + " is not loaded.");
           }
 
@@ -393,8 +393,8 @@ public class ViewInstanceResourceProvider extends AbstractResourceProvider {
         Set<ViewInstanceEntity> viewInstanceEntities = new HashSet<ViewInstanceEntity>();
 
         for (ViewEntity viewEntity : viewRegistry.getDefinitions()){
-          // the view must be in the LOADED state to delete an instance
-          if (viewEntity.isLoaded()) {
+          // the view must be in the DEPLOYED state to delete an instance
+          if (viewEntity.isDeployed()) {
             for (ViewInstanceEntity viewInstanceEntity : viewRegistry.getInstanceDefinitions(viewEntity)){
               Resource resource = toResource(viewInstanceEntity, requestedIds);
               if (predicate == null || predicate.evaluate(resource)) {

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewPermissionResourceProvider.java

@@ -127,7 +127,7 @@ public class ViewPermissionResourceProvider extends AbstractResourceProvider {
         ViewEntity viewEntity = viewRegistry.getDefinition(viewName.toString(), viewVersion.toString());
 
         // do not report permissions for views that are not loaded.
-        if (viewEntity.isLoaded()) {
+        if (viewEntity.isDeployed()) {
           resources.add(toResource(viewUsePermission, viewEntity.getResourceType(), viewEntity, requestedIds));
         }
       }
@@ -138,7 +138,7 @@ public class ViewPermissionResourceProvider extends AbstractResourceProvider {
 
       ViewEntity viewEntity = viewRegistry.getDefinition(resourceType);
 
-      if (viewEntity != null && viewEntity.isLoaded()) {
+      if (viewEntity != null && viewEntity.isDeployed()) {
         resources.add(toResource(permissionEntity, resourceType, viewEntity, requestedIds));
       }
     }

+ 4 - 4
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ViewPrivilegeResourceProvider.java

@@ -118,7 +118,7 @@ public class ViewPrivilegeResourceProvider extends PrivilegeResourceProvider<Vie
 
       ViewEntity view = viewInstanceEntity.getViewEntity();
 
-      return view.isLoaded() ?
+      return view.isDeployed() ?
           Collections.singletonMap(viewInstanceEntity.getResource().getId(), viewInstanceEntity) :
           Collections.<Long, ViewInstanceEntity>emptyMap();
     }
@@ -141,7 +141,7 @@ public class ViewPrivilegeResourceProvider extends PrivilegeResourceProvider<Vie
     Map<Long, ViewInstanceEntity> resourceEntities = new HashMap<Long, ViewInstanceEntity>();
 
     for (ViewEntity viewEntity : viewEntities) {
-      if (viewEntity.isLoaded()) {
+      if (viewEntity.isDeployed()) {
         for (ViewInstanceEntity viewInstanceEntity : viewEntity.getInstances()) {
           resourceEntities.put(viewInstanceEntity.getResource().getId(), viewInstanceEntity);
         }
@@ -164,7 +164,7 @@ public class ViewPrivilegeResourceProvider extends PrivilegeResourceProvider<Vie
 
       ViewEntity view = viewInstanceEntity.getViewEntity();
 
-      return view.isLoaded() ? viewInstanceEntity.getResource().getId() : null;
+      return view.isDeployed() ? viewInstanceEntity.getResource().getId() : null;
     }
     return null;
   }
@@ -189,7 +189,7 @@ public class ViewPrivilegeResourceProvider extends PrivilegeResourceProvider<Vie
       ViewInstanceEntity viewInstanceEntity = resourceEntities.get(privilegeEntity.getResource().getId());
       ViewEntity         viewEntity         = viewInstanceEntity.getViewEntity();
 
-      if (!viewEntity.isLoaded()) {
+      if (!viewEntity.isDeployed()) {
         return null;
       }
 

+ 0 - 1
ambari-server/src/main/java/org/apache/ambari/server/metadata/ActionMetadata.java

@@ -42,7 +42,6 @@ public class ActionMetadata {
       Map<String, String> serviceChecks = new HashMap<String, String>();
       
       serviceChecks.put(Service.Type.ZOOKEEPER.toString(), "ZOOKEEPER_QUORUM_SERVICE_CHECK");
-      serviceChecks.put(Service.Type.HCATALOG.toString(), "HCAT_SERVICE_CHECK");
       
       SERVICE_CHECKS = Collections.unmodifiableMap(serviceChecks);
   }

+ 4 - 4
ambari-server/src/main/java/org/apache/ambari/server/orm/entities/ViewEntity.java

@@ -777,12 +777,12 @@ public class ViewEntity implements ViewDefinition {
   }
 
   /**
-   * Determine whether or not the entity is loaded.
+   * Determine whether or not the entity is deployed.
    *
-   * @return true if the entity is loaded
+   * @return true if the entity is deployed
    */
-  public boolean isLoaded() {
-    return status.equals(ViewStatus.LOADED);
+  public boolean isDeployed() {
+    return status.equals(ViewStatus.DEPLOYED);
   }
 
   /**

+ 3 - 5
ambari-server/src/main/java/org/apache/ambari/server/state/Service.java

@@ -18,14 +18,13 @@
 
 package org.apache.ambari.server.state;
 
-import java.util.Map;
-import java.util.concurrent.locks.ReadWriteLock;
-
 import com.google.inject.persist.Transactional;
-
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.ServiceResponse;
 
+import java.util.Map;
+import java.util.concurrent.locks.ReadWriteLock;
+
 public interface Service {
 
   public String getName();
@@ -113,7 +112,6 @@ public interface Service {
     GANGLIA,
     ZOOKEEPER,
     PIG,
-    HCATALOG,
     FLUME,
     YARN,
     MAPREDUCE2

+ 135 - 7
ambari-server/src/main/java/org/apache/ambari/server/upgrade/UpgradeCatalog170.java

@@ -19,20 +19,20 @@
 package org.apache.ambari.server.upgrade;
 
 import java.lang.reflect.Type;
-
 import java.sql.Connection;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
-import java.util.Date;
 
 import javax.persistence.EntityManager;
 import javax.persistence.TypedQuery;
@@ -42,13 +42,16 @@ import javax.persistence.criteria.Expression;
 import javax.persistence.criteria.Predicate;
 import javax.persistence.criteria.Root;
 
-import com.google.common.reflect.TypeToken;
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.configuration.Configuration;
 import org.apache.ambari.server.controller.AmbariManagementController;
 import org.apache.ambari.server.orm.DBAccessor.DBColumnInfo;
 import org.apache.ambari.server.orm.dao.ClusterDAO;
+import org.apache.ambari.server.orm.dao.ClusterServiceDAO;
+import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
 import org.apache.ambari.server.orm.dao.DaoUtils;
+import org.apache.ambari.server.orm.dao.HostComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.HostComponentStateDAO;
 import org.apache.ambari.server.orm.dao.HostRoleCommandDAO;
 import org.apache.ambari.server.orm.dao.KeyValueDAO;
 import org.apache.ambari.server.orm.dao.PermissionDAO;
@@ -57,14 +60,19 @@ import org.apache.ambari.server.orm.dao.PrincipalTypeDAO;
 import org.apache.ambari.server.orm.dao.PrivilegeDAO;
 import org.apache.ambari.server.orm.dao.ResourceDAO;
 import org.apache.ambari.server.orm.dao.ResourceTypeDAO;
-import org.apache.ambari.server.orm.dao.ConfigGroupConfigMappingDAO;
+import org.apache.ambari.server.orm.dao.ServiceComponentDesiredStateDAO;
+import org.apache.ambari.server.orm.dao.ServiceDesiredStateDAO;
 import org.apache.ambari.server.orm.dao.UserDAO;
 import org.apache.ambari.server.orm.dao.ViewDAO;
 import org.apache.ambari.server.orm.dao.ViewInstanceDAO;
+import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
 import org.apache.ambari.server.orm.entities.ClusterEntity;
-import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntity;
+import org.apache.ambari.server.orm.entities.ClusterServiceEntityPK;
 import org.apache.ambari.server.orm.entities.ConfigGroupConfigMappingEntity;
-import org.apache.ambari.server.orm.entities.ClusterConfigEntity;
+import org.apache.ambari.server.orm.entities.HostComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.HostComponentStateEntity;
+import org.apache.ambari.server.orm.entities.HostRoleCommandEntity;
 import org.apache.ambari.server.orm.entities.HostRoleCommandEntity_;
 import org.apache.ambari.server.orm.entities.KeyValueEntity;
 import org.apache.ambari.server.orm.entities.PermissionEntity;
@@ -73,6 +81,10 @@ import org.apache.ambari.server.orm.entities.PrincipalTypeEntity;
 import org.apache.ambari.server.orm.entities.PrivilegeEntity;
 import org.apache.ambari.server.orm.entities.ResourceEntity;
 import org.apache.ambari.server.orm.entities.ResourceTypeEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceComponentDesiredStateEntityPK;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntity;
+import org.apache.ambari.server.orm.entities.ServiceDesiredStateEntityPK;
 import org.apache.ambari.server.orm.entities.UserEntity;
 import org.apache.ambari.server.orm.entities.ViewEntity;
 import org.apache.ambari.server.orm.entities.ViewInstanceEntity;
@@ -85,6 +97,7 @@ import org.apache.ambari.server.utils.StageUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.reflect.TypeToken;
 import com.google.inject.Inject;
 import com.google.inject.Injector;
 
@@ -533,6 +546,9 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
   @Override
   protected void executeDMLUpdates() throws AmbariException, SQLException {
     // Update historic records with the log paths, but only enough so as to not prolong the upgrade process
+    moveHcatalogIntoHiveService();
+    moveWebHcatIntoHiveService();
+
     executeInTransaction(new Runnable() {
       @Override
       public void run() {
@@ -598,6 +614,118 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
     moveConfigGroupsGlobalToEnv();
   }
 
+  public void moveHcatalogIntoHiveService() throws AmbariException {
+    final String serviceName = "HIVE";
+    final String serviceNameToBeDeleted = "HCATALOG";
+    final String componentName = "HCAT";
+    moveComponentsIntoService(serviceName, serviceNameToBeDeleted, componentName);
+  }
+
+  private void moveWebHcatIntoHiveService() throws AmbariException {
+    final String serviceName = "HIVE";
+    final String serviceNameToBeDeleted = "WEBHCAT";
+    final String componentName = "WEBHCAT_SERVER";
+    moveComponentsIntoService(serviceName, serviceNameToBeDeleted, componentName);
+  }
+
+  private void moveComponentsIntoService(String serviceName, String serviceNameToBeDeleted, String componentName) throws AmbariException {
+    /**
+     * 1. ADD servicecomponentdesiredstate: Add HCAT HIVE entry:
+     * 2. Update hostcomponentdesiredstate: service_name to HIVE where service_name is HCATALOG:
+     * 3. Update hostcomponentstate: service_name to HIVE where service_name is HCATALOG:
+     * 4. DELETE servicecomponentdesiredstate: where component_name is HCAT and service_name is HCATALOG :
+     * 5. Delete servicedesiredstate where  service_name is HCATALOG:
+     * 6. Delete clusterservices where service_name is  HCATALOG:
+     */
+    ClusterDAO clusterDAO = injector.getInstance(ClusterDAO.class);
+    ClusterServiceDAO clusterServiceDAO = injector.getInstance(ClusterServiceDAO.class);
+    ServiceDesiredStateDAO serviceDesiredStateDAO = injector.getInstance(ServiceDesiredStateDAO.class);
+    ServiceComponentDesiredStateDAO serviceComponentDesiredStateDAO = injector.getInstance(ServiceComponentDesiredStateDAO.class);
+    HostComponentDesiredStateDAO hostComponentDesiredStateDAO = injector.getInstance(HostComponentDesiredStateDAO.class);
+    HostComponentStateDAO hostComponentStateDAO = injector.getInstance(HostComponentStateDAO.class);
+
+    List<ClusterEntity> clusterEntities = clusterDAO.findAll();
+    for (final ClusterEntity clusterEntity : clusterEntities) {
+      ServiceComponentDesiredStateEntityPK pkHCATInHcatalog = new ServiceComponentDesiredStateEntityPK();
+      pkHCATInHcatalog.setComponentName(componentName);
+      pkHCATInHcatalog.setClusterId(clusterEntity.getClusterId());
+      pkHCATInHcatalog.setServiceName(serviceNameToBeDeleted);
+      ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntityToDelete = serviceComponentDesiredStateDAO.findByPK(pkHCATInHcatalog);
+
+      if (serviceComponentDesiredStateEntityToDelete == null) {
+        continue;
+      }
+
+      ServiceDesiredStateEntityPK serviceDesiredStateEntityPK = new ServiceDesiredStateEntityPK();
+      serviceDesiredStateEntityPK.setClusterId(clusterEntity.getClusterId());
+      serviceDesiredStateEntityPK.setServiceName(serviceNameToBeDeleted);
+      ServiceDesiredStateEntity serviceDesiredStateEntity = serviceDesiredStateDAO.findByPK(serviceDesiredStateEntityPK);
+
+      ClusterServiceEntityPK clusterServiceEntityToBeDeletedPK = new ClusterServiceEntityPK();
+      clusterServiceEntityToBeDeletedPK.setClusterId(clusterEntity.getClusterId());
+      clusterServiceEntityToBeDeletedPK.setServiceName(serviceNameToBeDeleted);
+      ClusterServiceEntity clusterServiceEntityToBeDeleted = clusterServiceDAO.findByPK(clusterServiceEntityToBeDeletedPK);
+
+      ClusterServiceEntityPK clusterServiceEntityPK = new ClusterServiceEntityPK();
+      clusterServiceEntityPK.setClusterId(clusterEntity.getClusterId());
+      clusterServiceEntityPK.setServiceName(serviceName);
+
+
+      ClusterServiceEntity clusterServiceEntity = clusterServiceDAO.findByPK(clusterServiceEntityPK);
+
+      ServiceComponentDesiredStateEntity serviceComponentDesiredStateEntity = new ServiceComponentDesiredStateEntity();
+      serviceComponentDesiredStateEntity.setServiceName(serviceName);
+      serviceComponentDesiredStateEntity.setComponentName(serviceComponentDesiredStateEntityToDelete.getComponentName());
+      serviceComponentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
+      serviceComponentDesiredStateEntity.setDesiredStackVersion(serviceComponentDesiredStateEntityToDelete.getDesiredStackVersion());
+      serviceComponentDesiredStateEntity.setDesiredState(serviceComponentDesiredStateEntityToDelete.getDesiredState());
+      serviceComponentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
+      //serviceComponentDesiredStateDAO.create(serviceComponentDesiredStateEntity);
+
+      Iterator<HostComponentDesiredStateEntity> hostComponentDesiredStateIterator = serviceComponentDesiredStateEntityToDelete.getHostComponentDesiredStateEntities().iterator();
+      Iterator<HostComponentStateEntity> hostComponentStateIterator = serviceComponentDesiredStateEntityToDelete.getHostComponentStateEntities().iterator();
+
+      while (hostComponentDesiredStateIterator.hasNext()) {
+        HostComponentDesiredStateEntity hcDesiredStateEntityToBeDeleted = hostComponentDesiredStateIterator.next();
+        HostComponentDesiredStateEntity hostComponentDesiredStateEntity = new HostComponentDesiredStateEntity();
+        hostComponentDesiredStateEntity.setClusterId(clusterEntity.getClusterId());
+        hostComponentDesiredStateEntity.setComponentName(hcDesiredStateEntityToBeDeleted.getComponentName());
+        hostComponentDesiredStateEntity.setDesiredStackVersion(hcDesiredStateEntityToBeDeleted.getDesiredStackVersion());
+        hostComponentDesiredStateEntity.setDesiredState(hcDesiredStateEntityToBeDeleted.getDesiredState());
+        hostComponentDesiredStateEntity.setHostName(hcDesiredStateEntityToBeDeleted.getHostName());
+        hostComponentDesiredStateEntity.setHostEntity(hcDesiredStateEntityToBeDeleted.getHostEntity());
+        hostComponentDesiredStateEntity.setAdminState(hcDesiredStateEntityToBeDeleted.getAdminState());
+        hostComponentDesiredStateEntity.setMaintenanceState(hcDesiredStateEntityToBeDeleted.getMaintenanceState());
+        hostComponentDesiredStateEntity.setRestartRequired(hcDesiredStateEntityToBeDeleted.isRestartRequired());
+        hostComponentDesiredStateEntity.setServiceName(serviceName);
+        hostComponentDesiredStateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity);
+        hostComponentDesiredStateDAO.merge(hostComponentDesiredStateEntity);
+        hostComponentDesiredStateDAO.remove(hcDesiredStateEntityToBeDeleted);
+      }
+
+      while (hostComponentStateIterator.hasNext()) {
+        HostComponentStateEntity hcStateToBeDeleted = hostComponentStateIterator.next();
+        HostComponentStateEntity hostComponentStateEntity = new HostComponentStateEntity();
+        hostComponentStateEntity.setClusterId(clusterEntity.getClusterId());
+        hostComponentStateEntity.setComponentName(hcStateToBeDeleted.getComponentName());
+        hostComponentStateEntity.setCurrentStackVersion(hcStateToBeDeleted.getCurrentStackVersion());
+        hostComponentStateEntity.setCurrentState(hcStateToBeDeleted.getCurrentState());
+        hostComponentStateEntity.setHostName(hcStateToBeDeleted.getHostName());
+        hostComponentStateEntity.setHostEntity(hcStateToBeDeleted.getHostEntity());
+        hostComponentStateEntity.setServiceName(serviceName);
+        hostComponentStateEntity.setServiceComponentDesiredStateEntity(serviceComponentDesiredStateEntity);
+        hostComponentStateDAO.merge(hostComponentStateEntity);
+        hostComponentStateDAO.remove(hcStateToBeDeleted);
+      }
+      serviceComponentDesiredStateEntity.setClusterServiceEntity(clusterServiceEntity);
+      serviceComponentDesiredStateDAO.merge(serviceComponentDesiredStateEntity);
+      serviceComponentDesiredStateDAO.remove(serviceComponentDesiredStateEntityToDelete);
+      serviceDesiredStateDAO.remove(serviceDesiredStateEntity);
+      clusterServiceDAO.remove(clusterServiceEntityToBeDeleted);
+    }
+  }
+
+
   private void moveConfigGroupsGlobalToEnv() throws AmbariException {
     final ConfigGroupConfigMappingDAO confGroupConfMappingDAO = injector.getInstance(ConfigGroupConfigMappingDAO.class);
     ConfigHelper configHelper = injector.getInstance(ConfigHelper.class);
@@ -906,7 +1034,7 @@ public class UpgradeCatalog170 extends AbstractUpgradeCatalog {
     if (clusterMap != null && !clusterMap.isEmpty()) {
       for (final Cluster cluster : clusterMap.values()) {
         Set<String> configTypes = configHelper.findConfigTypesByPropertyName(cluster.getCurrentStackVersion(),
-                CONTENT_FIELD_NAME, cluster.getClusterName());
+            CONTENT_FIELD_NAME, cluster.getClusterName());
 
         for(String configType:configTypes) {
           if(!configType.endsWith(ENV_CONFIGS_POSTFIX)) {

+ 6 - 4
ambari-server/src/main/java/org/apache/ambari/server/view/ViewRegistry.java

@@ -261,8 +261,10 @@ public class ViewRegistry {
   public ViewEntity getDefinition(ResourceTypeEntity resourceTypeEntity) {
 
     for (ViewEntity viewEntity : viewDefinitions.values()) {
-      if (viewEntity.getResourceType().equals(resourceTypeEntity)) {
-        return viewEntity;
+      if (viewEntity.isDeployed()) {
+        if (viewEntity.getResourceType().equals(resourceTypeEntity)) {
+          return viewEntity;
+        }
       }
     }
     return null;
@@ -1188,7 +1190,7 @@ public class ViewRegistry {
                                                   File extractedArchiveDirFile,
                                                   ViewConfig viewConfig) {
 
-    setViewStatus(viewDefinition, ViewEntity.ViewStatus.LOADING, "Loading " + extractedArchiveDirFile + ".");
+    setViewStatus(viewDefinition, ViewEntity.ViewStatus.DEPLOYING, "Deploying " + extractedArchiveDirFile + ".");
 
     String extractedArchiveDirPath = extractedArchiveDirFile.getAbsolutePath();
 
@@ -1217,7 +1219,7 @@ public class ViewRegistry {
         addInstanceDefinition(viewDefinition, instanceEntity);
         handlerList.addViewInstance(instanceEntity);
       }
-      setViewStatus(viewDefinition, ViewEntity.ViewStatus.LOADED, "Loaded " + extractedArchiveDirPath + ".");
+      setViewStatus(viewDefinition, ViewEntity.ViewStatus.DEPLOYED, "Deployed " + extractedArchiveDirPath + ".");
 
     } catch (Exception e) {
       String msg = "Caught exception loading view " + viewDefinition.getViewName();

+ 1 - 1
ambari-server/src/main/package/rpm/preremove.sh

@@ -34,7 +34,7 @@ if [ "$1" -eq 0 ]; then  # Action is uninstall
       /var/lib/ambari-server/install-helper.sh remove
     fi
 
-    chkconfig --del ambari-server
+    chkconfig --list | grep ambari-server && chkconfig --del ambari-server
 fi
 
 exit 0

+ 0 - 2
ambari-server/src/main/resources/custom_actions/validate_configs.py

@@ -131,7 +131,6 @@ PROPERTIES_TO_CHECK = {
   "HIVE_CLIENT": {
     "hive-env": ["hive_log_dir", "hive_pid_dir"]
   },
-  #HCATALOG
   "HCAT": {
     "hive-env": ["hcat_log_dir", "hcat_pid_dir"]
   },
@@ -297,7 +296,6 @@ USERS_TO_GROUP_MAPPING = {
       "hive_user": "hive_user"
     }
   },
-  #HCATALOG
   "HCAT": {
     "hive-env": {
       "hive_user": "hive_user"

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/hooks/before-START/scripts/shared_initialization.py

@@ -39,7 +39,8 @@ def setup_hadoop():
     Directory(params.hdfs_log_dir_prefix,
               recursive=True,
               owner='root',
-              group='root'
+              group=params.user_group,
+              mode=0775
     )
     Directory(params.hadoop_pid_dir_prefix,
               recursive=True,

+ 1 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/role_command_order.json

@@ -20,10 +20,8 @@
         "WEBHCAT_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-env.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/webhcat-env.xml

@@ -27,7 +27,7 @@
     <description>webhcat-env.sh content</description>
     <value>
 # The file containing the running pid
-PID_FILE={{pid_file}}
+PID_FILE={{webhcat_pid_file}}
 
 TEMPLETON_LOG_DIR={{templeton_log_dir}}/
 

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/configuration/webhcat-site.xml → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/configuration/webhcat-site.xml


+ 82 - 72
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/metainfo.xml

@@ -80,6 +80,49 @@
           </commandScript>
         </component>
 
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
         <component>
           <name>HIVE_CLIENT</name>
           <displayName>Hive Client</displayName>
@@ -112,68 +155,6 @@
             </configFile>            
           </configFiles>
         </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive</name>
-            </package>
-            <package>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>redhat5,redhat6</osFamily>
-          <packages>
-            <package>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osFamily>suse11</osFamily>
-          <packages>
-            <package>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>MAPREDUCE</service>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <displayName>HCatalog</displayName>
-      <comment>A table and storage management layer for Hadoop that enables users with different data processing tools
-        to more easily read and write data on the grid.
-      </comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
         <component>
           <name>HCAT</name>
           <displayName>HCat</displayName>
@@ -206,16 +187,49 @@
           </configFiles>
         </component>
       </components>
+
       <osSpecifics>
         <osSpecific>
           <osFamily>any</osFamily>
           <packages>
+            <package>
+              <name>hive</name>
+            </package>
             <package>
               <name>hcatalog</name>
             </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
+
       <commandScript>
         <script>scripts/service_check.py</script>
         <scriptType>PYTHON</scriptType>
@@ -223,22 +237,18 @@
       </commandScript>
       
       <requiredServices>
-        <service>HIVE</service>
+        <service>MAPREDUCE</service>
+        <service>ZOOKEEPER</service>
       </requiredServices>
 
       <configuration-dependencies>
         <config-type>hive-site</config-type>
         <config-type>hive-env</config-type>
-      </configuration-dependencies>
-
-      <excluded-config-types>
-        <config-type>hive-env</config-type>
-        <config-type>hive-site</config-type>
-        <config-type>hive-exec-log4j</config-type>
         <config-type>hive-log4j</config-type>
-      </excluded-config-types>
-
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
+      </configuration-dependencies>
     </service>
-
   </services>
 </metainfo>

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/files/templetonSmoke.sh → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/files/templetonSmoke.sh


+ 28 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/params.py

@@ -64,7 +64,6 @@ smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
 
 #hive_env
@@ -155,11 +154,37 @@ hive_hdfs_user_dir = format("/user/{hive_user}")
 hive_hdfs_user_mode = 0700
 #for create_hdfs_directory
 hostname = config["hostname"]
-hadoop_conf_dir = "/etc/hadoop/conf"
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
 kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#################################################
+################## WebHCat ######################
+#################################################
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+
+config_dir = '/etc/hcatalog/conf'
+
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.templeton_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+
+#hdfs directories
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/service_check.py

@@ -23,6 +23,7 @@ import socket
 import sys
 
 from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
 
 class HiveServiceCheck(Script):
   def service_check(self, env):
@@ -42,6 +43,7 @@ class HiveServiceCheck(Script):
       sys.exit(1)
 
     hcat_service_check()
+    webhcat_service_check()
 
 if __name__ == "__main__":
   HiveServiceCheck().execute()

+ 3 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/status_params.py

@@ -29,6 +29,9 @@ hive_metastore_pid = 'hive.pid'
 
 hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
 
+templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir']
+webhcat_pid_file = format('{templeton_pid_dir}/webhcat.pid')
+
 if System.get_instance().os_family == "suse":
   daemon_name = 'mysql'
 else:

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat.py → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat.py


+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_server.py → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_server.py

@@ -47,7 +47,7 @@ class WebHCatServer(Script):
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    check_process_status(status_params.pid_file)
+    check_process_status(status_params.webhcat_pid_file)
 
 if __name__ == "__main__":
   WebHCatServer().execute()

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/webhcat_service.py → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_service.py

@@ -27,7 +27,7 @@ def webhcat_service(action='start'):
 
   if action == 'start':
     demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps `cat {webhcat_pid_file}` >/dev/null 2>&1')
     Execute(demon_cmd,
             user=params.webhcat_user,
             not_if=no_op_test
@@ -37,4 +37,4 @@ def webhcat_service(action='start'):
     Execute(demon_cmd,
             user=params.webhcat_user
     )
-    Execute(format('rm -f {pid_file}'))
+    Execute(format('rm -f {webhcat_pid_file}'))

+ 19 - 3
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/status_params.py → ambari-server/src/main/resources/stacks/HDP/1.3.2/services/HIVE/package/scripts/webhcat_service_check.py

@@ -20,7 +20,23 @@ limitations under the License.
 
 from resource_management import *
 
-config = Script.get_config()
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local}",
+               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)
+
+
+
 
-templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/metainfo.xml

@@ -52,7 +52,7 @@
               </auto-deploy>
             </dependency>
             <dependency>
-              <name>HCATALOG/HCAT</name>
+              <name>HIVE/HCAT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>

+ 0 - 6
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2

@@ -69,12 +69,6 @@ define servicegroup {
   alias  OOZIE Checks
 }
 {% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
 {% if hostgroup_defs['nagios-server'] %}
 define servicegroup {
   servicegroup_name  NAGIOS

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/NAGIOS/package/templates/hadoop-services.cfg.j2

@@ -566,7 +566,7 @@ define service {
         hostgroup_name          webhcat-server
         use                     hadoop-service
         service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
+        servicegroups           HIVE
         {% if security_enabled %}
         check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
         {% else %}

+ 0 - 103
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/metainfo.xml

@@ -1,103 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <displayName>WebHCat</displayName>
-      <comment>Provides a REST-like web API for HCatalog and related Hadoop components.</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>ZOOKEEPER</service>
-        <service>HIVE</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 0 - 20
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/__init__.py

@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

+ 0 - 78
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/params.py

@@ -1,78 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-
-config_dir = '/etc/hcatalog/conf'
-
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr'
-user_group = config['configurations']['cluster-env']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#hdfs directories
-webhcat_apps_dir = "/apps/webhcat"
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-#for create_hdfs_directory
-hostname = config["hostname"]
-security_param = "true" if security_enabled else "false"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local
-)

+ 0 - 44
ambari-server/src/main/resources/stacks/HDP/1.3.2/services/WEBHCAT/package/scripts/service_check.py

@@ -1,44 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File(format("{tmp_dir}/templetonSmoke.sh"),
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("sh {tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_param} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()

+ 1 - 3
ambari-server/src/main/resources/stacks/HDP/1.3/role_command_order.json

@@ -20,10 +20,8 @@
         "WEBHCAT_SERVER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START","WEBHCAT_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],

+ 0 - 5
ambari-server/src/main/resources/stacks/HDP/1.3/services/HIVE/metainfo.xml

@@ -23,10 +23,5 @@
       <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
       <version>0.11.0.1.3</version>
     </service>
-    <service>
-      <name>HCATALOG</name>
-      <comment>HCATALOG</comment>
-      <version>0.11.0.1.3</version>
-    </service>
   </services>
 </metainfo>

+ 9 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6.GlusterFS/services/HDFS/configuration/core-site.xml

@@ -164,4 +164,13 @@ DEFAULT
     </description>
   </property>
 
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+    </description>
+  </property>
 </configuration>

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py

@@ -38,7 +38,8 @@ def setup_hadoop():
     Directory(params.hdfs_log_dir_prefix,
               recursive=True,
               owner='root',
-              group='root'
+              group=params.user_group,
+              mode=0775
     )
     Directory(params.hadoop_pid_dir_prefix,
               recursive=True,

+ 1 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/role_command_order.json

@@ -21,10 +21,8 @@
         "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START", "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],

+ 10 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HDFS/configuration/core-site.xml

@@ -177,4 +177,14 @@ DEFAULT
     </description>
   </property>
 
+  <property>
+    <name>dfs.http.policy</name>
+    <value>HTTP_ONLY</value>
+    <description>
+      Decide if HTTPS(SSL) is supported on HDFS This configures the HTTP endpoint for HDFS daemons:
+      The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY :
+      Service is provided only on https - HTTP_AND_HTTPS : Service is provided both on http and https
+    </description>
+  </property>
+
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-env.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-env.xml

@@ -27,7 +27,7 @@
     <description>webhcat-env.sh content</description>
     <value>
 # The file containing the running pid
-PID_FILE={{pid_file}}
+PID_FILE={{webhcat_pid_file}}
 
 TEMPLETON_LOG_DIR={{templeton_log_dir}}/
 

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/configuration/webhcat-site.xml → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/configuration/webhcat-site.xml


+ 91 - 74
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/metainfo.xml

@@ -75,7 +75,55 @@
             <scriptType>PYTHON</scriptType>
           </commandScript>
         </component>
-
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <displayName>WebHCat Server</displayName>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>YARN/YARN_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
         <component>
           <name>MYSQL_SERVER</name>
           <displayName>MySQL Server</displayName>
@@ -119,6 +167,37 @@
             </configFile>                         
           </configFiles>
         </component>
+        <component>
+          <name>HCAT</name>
+          <displayName>HCat</displayName>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <configFiles>
+            <configFile>
+              <type>xml</type>
+              <fileName>hive-site.xml</fileName>
+              <dictionaryName>hive-site</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-env.sh</fileName>
+              <dictionaryName>hive-env</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-log4j.properties</fileName>
+              <dictionaryName>hive-log4j</dictionaryName>
+            </configFile>
+            <configFile>
+              <type>env</type>
+              <fileName>hive-exec-log4j.properties</fileName>
+              <dictionaryName>hive-exec-log4j</dictionaryName>
+            </configFile>
+          </configFiles>
+        </component>
       </components>
 
       <osSpecifics>
@@ -128,6 +207,15 @@
             <package>
               <name>hive</name>
             </package>
+            <package>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
             <package>
               <name>mysql-connector-java</name>
             </package>
@@ -176,80 +264,9 @@
         <config-type>hive-log4j</config-type>
         <config-type>hive-exec-log4j</config-type>
         <config-type>hive-env</config-type>
+        <config-type>webhcat-site</config-type>
+        <config-type>webhcat-env</config-type>
       </configuration-dependencies>
     </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <displayName>HCatalog</displayName>
-      <comment>A table and storage management layer for Hadoop that enables users with different data processing tools
-        to more easily read and write data on the grid.
-      </comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <displayName>HCat</displayName>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HIVE</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-env</config-type>
-      </configuration-dependencies>
-      <excluded-config-types>
-        <config-type>hive-env</config-type>
-        <config-type>hive-site</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>hive-log4j</config-type>
-      </excluded-config-types>
-    </service>
-
   </services>
 </metainfo>

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/files/templetonSmoke.sh → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/files/templetonSmoke.sh


+ 42 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/params.py

@@ -36,6 +36,7 @@ if rpm_version is not None:
   hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
   hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
   hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
+  hadoop_streeming_jars = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/hadoop-streaming-*.jar")
   hive_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
   hive_client_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf')
   hive_server_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive/conf.server')
@@ -46,16 +47,21 @@ if rpm_version is not None:
 
   if str(hdp_stack_version).startswith('2.0'):
     hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
+    config_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
     hcat_lib = format('/usr/hdp/{rpm_version}/hive/hcatalog/share/hcatalog')
+    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hcatalog/sbin')
   # for newer versions
   else:
     hcat_conf_dir = format('/usr/hdp/{rpm_version}/etc/hive-hcatalog/conf')
+    config_dir = format('/usr/hdp/{rpm_version}/etc/hive-webhcat/conf')
     hcat_lib = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/share/hcatalog')
+    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/sbin')
 
 else:
   hadoop_conf_dir = "/etc/hadoop/conf"
   hadoop_bin_dir = "/usr/bin"
   hadoop_home = '/usr'
+  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
   hive_conf_dir = "/etc/hive/conf"
   hive_bin = '/usr/lib/hive/bin'
   hive_lib = '/usr/lib/hive/lib/'
@@ -66,11 +72,15 @@ else:
 
   if str(hdp_stack_version).startswith('2.0'):
     hcat_conf_dir = '/etc/hcatalog/conf'
+    config_dir = '/etc/hcatalog/conf'
     hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
   # for newer versions
   else:
     hcat_conf_dir = '/etc/hive-hcatalog/conf'
+    config_dir = '/etc/hive-webhcat/conf'
     hcat_lib = '/usr/lib/hive-hcatalog/share/hcatalog'
+    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
 
 execute_path = os.environ['PATH'] + os.pathsep + hive_bin
 hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
@@ -202,9 +212,7 @@ hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.wareho
 #for create_hdfs_directory
 hostname = config["hostname"]
 hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
 hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
 
 # Tez libraries
 tez_lib_uris = default("/configurations/tez-site/tez.lib.uris", None)
@@ -228,6 +236,38 @@ if os.path.exists(mysql_jdbc_driver_jar):
 else:  
   hive_exclude_packages = []
 
+########################################################
+########### WebHCat related params #####################
+########################################################
+
+if str(config['hostLevelParams']['stack_version']).startswith('2.0'):
+  config_dir = '/etc/hcatalog/conf'
+  webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
+# for newer versions
+else:
+  config_dir = '/etc/hive-webhcat/conf'
+  webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
+
+webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
+templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
+templeton_pid_dir = status_params.hcat_pid_dir
+
+webhcat_pid_file = status_params.webhcat_pid_file
+
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+
+hcat_hdfs_user_dir = format("/user/{hcat_user}")
+hcat_hdfs_user_mode = 0755
+webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+webhcat_hdfs_user_mode = 0755
+#for create_hdfs_directory
+security_param = "true" if security_enabled else "false"
+
 import functools
 #create partial functions with common arguments for every HdfsDirectory call
 #to create hdfs directory we need to call params.HdfsDirectory in code

+ 2 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/service_check.py

@@ -23,6 +23,7 @@ import socket
 import sys
 
 from hcat_service_check import hcat_service_check
+from webhcat_service_check import webhcat_service_check
 
 class HiveServiceCheck(Script):
   def service_check(self, env):
@@ -42,6 +43,7 @@ class HiveServiceCheck(Script):
       sys.exit(1)
 
     hcat_service_check()
+    webhcat_service_check()
 
 if __name__ == "__main__":
   HiveServiceCheck().execute()

+ 1 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/status_params.py

@@ -28,6 +28,7 @@ hive_pid = 'hive-server.pid'
 hive_metastore_pid = 'hive.pid'
 
 hcat_pid_dir = config['configurations']['hive-env']['hcat_pid_dir'] #hcat_pid_dir
+webhcat_pid_file = format('{hcat_pid_dir}/webhcat.pid')
 
 if System.get_instance().os_family == "suse" or System.get_instance().os_family == "ubuntu":
   daemon_name = 'mysql'

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat.py → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat.py


+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_server.py → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_server.py

@@ -47,7 +47,7 @@ class WebHCatServer(Script):
   def status(self, env):
     import status_params
     env.set_params(status_params)
-    check_process_status(status_params.pid_file)
+    check_process_status(status_params.webhcat_pid_file)
 
 if __name__ == "__main__":
   WebHCatServer().execute()

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/webhcat_service.py → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service.py

@@ -27,7 +27,7 @@ def webhcat_service(action='start'):
 
   if action == 'start':
     demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    no_op_test = format('ls {webhcat_pid_file} >/dev/null 2>&1 && ps `cat {webhcat_pid_file}` >/dev/null 2>&1')
     Execute(demon_cmd,
             user=params.webhcat_user,
             not_if=no_op_test
@@ -37,4 +37,4 @@ def webhcat_service(action='start'):
     Execute(demon_cmd,
             user=params.webhcat_user
     )
-    Execute(format('rm -f {pid_file}'))
+    Execute(format('rm -f {webhcat_pid_file}'))

+ 18 - 3
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/status_params.py → ambari-server/src/main/resources/stacks/HDP/2.0.6/services/HIVE/package/scripts/webhcat_service_check.py

@@ -20,7 +20,22 @@ limitations under the License.
 
 from resource_management import *
 
-config = Script.get_config()
+def webhcat_service_check():
+  import params
+  File(format("{tmp_dir}/templetonSmoke.sh"),
+       content= StaticFile('templetonSmoke.sh'),
+       mode=0755
+  )
+
+  cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+               " {security_param} {kinit_path_local}",
+               smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+  Execute(cmd,
+          tries=3,
+          try_sleep=5,
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+          logoutput=True)
+
+
 
-templeton_pid_dir = config['configurations']['hive-env']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/metainfo.xml

@@ -59,7 +59,7 @@
               </auto-deploy>
             </dependency>
             <dependency>
-              <name>HCATALOG/HCAT</name>
+              <name>HIVE/HCAT</name>
               <scope>host</scope>
               <auto-deploy>
                 <enabled>true</enabled>

+ 1 - 7
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2

@@ -54,12 +54,6 @@ define servicegroup {
   alias  OOZIE Checks
 }
 {% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
 {% if hostgroup_defs['nagios-server'] %}
 define servicegroup {
   servicegroup_name  NAGIOS
@@ -72,7 +66,7 @@ define servicegroup {
   alias  GANGLIA Checks
 }
 {% endif %}
-{% if hostgroup_defs['hiveserver'] %}
+{% if hostgroup_defs['hiveserver'] or hostgroup_defs['webhcat-server'] %}
 define servicegroup {
   servicegroup_name  HIVE
   alias  HIVE Checks

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/NAGIOS/package/templates/hadoop-services.cfg.j2

@@ -729,7 +729,7 @@ define service {
         hostgroup_name          webhcat-server
         use                     hadoop-service
         service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
+        servicegroups           HIVE
         {% if security_enabled %}
         check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
         {% else %}

+ 2 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/OOZIE/package/scripts/params.py

@@ -34,15 +34,16 @@ if rpm_version is not None:
   hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
   hadoop_lib_home = format("/usr/hdp/{rpm_version}/hadoop/lib")
   mapreduce_libs_path = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/*")
+  conf_dir = format("/usr/hdp/{rpm_version}/oozie/conf")
 else:
   hadoop_conf_dir = "/etc/hadoop/conf"
   hadoop_bin_dir = "/usr/bin"
   hadoop_lib_home = "/usr/lib/hadoop/lib"
   mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+  conf_dir = "/etc/oozie/conf"
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
-conf_dir = "/etc/oozie/conf"
 user_group = config['configurations']['cluster-env']['user_group']
 jdk_location = config['hostLevelParams']['jdk_location']
 check_db_connection_jar_name = "DBConnectionVerification.jar"

+ 8 - 4
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/SQOOP/package/scripts/params.py

@@ -27,18 +27,22 @@ rpm_version = default("/configurations/hadoop-env/rpm_version", None)
 #hadoop params
 if rpm_version is not None:
   zoo_conf_dir = format('/usr/hdp/{rpm_version}/etc/zookeeper')
+  sqoop_conf_dir = format('/usr/hdp/{rpm_version}/sqoop/conf')
+  sqoop_lib = format('/usr/hdp/{rpm_version}/sqoop/lib')
+  hbase_home = format('/usr/hdp/{rpm_version}/hbase')
+  hive_home = format('/usr/hdp/{rpm_version}/hive')
 else:
   zoo_conf_dir = "/etc/zookeeper"
+  sqoop_conf_dir = "/usr/lib/sqoop/conf"
+  sqoop_lib = "/usr/lib/sqoop/lib"
+  hbase_home = "/usr"
+  hive_home = "/usr"
 
 security_enabled = config['configurations']['cluster-env']['security_enabled']
 smokeuser = config['configurations']['cluster-env']['smokeuser']
 user_group = config['configurations']['cluster-env']['user_group']
 sqoop_env_sh_template = config['configurations']['sqoop-env']['content']
 
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-sqoop_lib = "/usr/lib/sqoop/lib"
 sqoop_user = config['configurations']['sqoop-env']['sqoop_user']
 
 smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']

+ 0 - 110
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/metainfo.xml

@@ -1,110 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <displayName>WebHCat</displayName>
-      <comment>Provides a REST-like web API for HCatalog and related Hadoop components.</comment>
-      <version>0.12.0.2.0</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <displayName>WebHCat Server</displayName>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE2/MAPREDUCE2_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>YARN/YARN_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      
-      <requiredServices>
-        <service>HIVE</service>
-        <service>ZOOKEEPER</service>
-      </requiredServices>
-      
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 0 - 20
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/__init__.py

@@ -1,20 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""

+ 0 - 102
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/params.py

@@ -1,102 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-#RPM versioning support
-rpm_version = default("/configurations/hadoop-env/rpm_version", None)
-
-#hadoop params
-hdp_stack_version = config['hostLevelParams']['stack_version']
-if rpm_version is not None:
-  hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
-  hadoop_home = format('/usr/hdp/{rpm_version}/hadoop')
-  hadoop_streeming_jars = format("/usr/hdp/{rpm_version}/hadoop-mapreduce/hadoop-streaming-*.jar")
-  if str(hdp_stack_version).startswith('2.0'):
-    config_dir = format('/usr/hdp/{rpm_version}/etc/hcatalog/conf')
-    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hcatalog/sbin')
-  # for newer versions
-  else:
-    config_dir = format('/usr/hdp/{rpm_version}/etc/hive-webhcat/conf')
-    webhcat_bin_dir = format('/usr/hdp/{rpm_version}/hive/hive-hcatalog/sbin')
-else:
-  hadoop_bin_dir = "/usr/bin"
-  hadoop_home = '/usr'
-  hadoop_streeming_jars = '/usr/lib/hadoop-mapreduce/hadoop-streaming-*.jar'
-  if str(hdp_stack_version).startswith('2.0'):
-    config_dir = '/etc/hcatalog/conf'
-    webhcat_bin_dir = '/usr/lib/hcatalog/sbin'
-  # for newer versions
-  else:
-    config_dir = '/etc/hive-webhcat/conf'
-    webhcat_bin_dir = '/usr/lib/hive-hcatalog/sbin'
-
-hcat_user = config['configurations']['hive-env']['hcat_user']
-webhcat_user = config['configurations']['hive-env']['webhcat_user']
-
-webhcat_env_sh_template = config['configurations']['webhcat-env']['content']
-templeton_log_dir = config['configurations']['hive-env']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
-smokeuser = config['configurations']['cluster-env']['smokeuser']
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-hcat_hdfs_user_dir = format("/user/{hcat_user}")
-hcat_hdfs_user_mode = 0755
-webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-webhcat_hdfs_user_mode = 0755
-webhcat_apps_dir = "/apps/webhcat"
-#for create_hdfs_directory
-hostname = config["hostname"]
-security_param = "true" if security_enabled else "false"
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
-kinit_path_local = functions.get_kinit_path(["/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-import functools
-#create partial functions with common arguments for every HdfsDirectory call
-#to create hdfs directory we need to call params.HdfsDirectory in code
-HdfsDirectory = functools.partial(
-  HdfsDirectory,
-  conf_dir=hadoop_conf_dir,
-  hdfs_user=hdfs_user,
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  bin_dir = hadoop_bin_dir
-)

+ 0 - 45
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/WEBHCAT/package/scripts/service_check.py

@@ -1,45 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    File(format("{tmp_dir}/templetonSmoke.sh"),
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("{tmp_dir}/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_param} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()

+ 1 - 3
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/role_command_order.json

@@ -26,10 +26,8 @@
         "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
     "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],

+ 58 - 42
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/HIVE/metainfo.xml

@@ -23,62 +23,78 @@
       <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
       <version>0.13.0.2.1</version>
 
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.12.0.2.1</version>
       <components>
         <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <configFiles>
-            <configFile>
-              <type>xml</type>
-              <fileName>hive-site.xml</fileName>
-              <dictionaryName>hive-site</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-env.sh</fileName>
-              <dictionaryName>hive-env</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-log4j.properties</fileName>
-              <dictionaryName>hive-log4j</dictionaryName>
-            </configFile>
-            <configFile>
-              <type>env</type>
-              <fileName>hive-exec-log4j.properties</fileName>
-              <dictionaryName>hive-exec-log4j</dictionaryName>
-            </configFile>
-          </configFiles>
+          <name>HIVE_SERVER</name>
+          <dependencies>
+            <dependency>
+              <name>TEZ/TEZ_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
         </component>
       </components>
+
       <osSpecifics>
         <osSpecific>
           <osFamily>any</osFamily>
           <packages>
+            <package>
+              <name>hive</name>
+            </package>
             <package>
               <name>hive-hcatalog</name>
             </package>
+            <package>
+              <name>hive-webhcat</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
-    </service>
 
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+      </configuration-dependencies>
+    </service>
   </services>
 </metainfo>

+ 0 - 143
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/WEBHCAT/configuration/webhcat-site.xml

@@ -1,143 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>glusterfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>glusterfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.home</name>
-    <value>hive.tar.gz/hive</value>
-    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat.home</name>
-    <value>hive.tar.gz/hive/hcatalog</value>
-    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>glusterfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The glusterfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.queue.name</name>
-    <value>default</value>
-    <description>MapReduce queue name where WebHCat map-only jobs will be submitted to. Can be used to avoid a deadlock where all map slots in the cluster are taken over by Templeton launcher tasks.</description>
-  </property>
-
-</configuration>

+ 0 - 46
ambari-server/src/main/resources/stacks/HDP/2.1.GlusterFS/services/WEBHCAT/metainfo.xml

@@ -1,46 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.13.0.2.1</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive-webhcat</name>
-            </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 1 - 2
ambari-server/src/main/resources/stacks/HDP/2.1/role_command_order.json

@@ -26,9 +26,8 @@
         "WEBHCAT_SERVER-START", "FLUME_HANDLER-START"],
     "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
     "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START", "WEBHCAT_SERVER-START"],
     "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
     "PIG_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
     "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],

+ 6 - 3
ambari-server/src/main/resources/stacks/HDP/2.1/services/FALCON/package/scripts/params.py

@@ -30,9 +30,15 @@ rpm_version = default("/configurations/hadoop-env/rpm_version", None)
 if rpm_version is not None:
   hadoop_conf_dir = format("/usr/hdp/{rpm_version}/etc/hadoop/conf")
   hadoop_bin_dir = format("/usr/hdp/{rpm_version}/hadoop/bin")
+  falcon_webapp_dir = format("/usr/hdp/{rpm_version}/falcon/webapp")
+  falcon_home = format("/usr/hdp/{rpm_version}/falcon")
+  falcon_conf_dir = format("/usr/hdp/{rpm_version}/falcon/conf")
 else:
   hadoop_conf_dir = "/etc/hadoop/conf"
   hadoop_bin_dir = "/usr/bin"
+  falcon_webapp_dir = '/var/lib/falcon/webapp'
+  falcon_home = '/usr/lib/falcon'
+  falcon_conf_dir = '/etc/falcon/conf'
 
 oozie_user = config['configurations']['oozie-env']['oozie_user']
 falcon_user = config['configurations']['falcon-env']['falcon_user']
@@ -42,8 +48,6 @@ user_group = config['configurations']['cluster-env']['user_group']
 proxyuser_group =  config['configurations']['hadoop-env']['proxyuser_group']
 
 java_home = config['hostLevelParams']['java_home']
-falcon_home = '/usr/lib/falcon'
-falcon_conf_dir = '/etc/falcon/conf'
 falcon_local_dir = config['configurations']['falcon-env']['falcon_local_dir']
 falcon_log_dir = config['configurations']['falcon-env']['falcon_log_dir']
 store_uri = config['configurations']['falcon-startup.properties']['*.config.store.uri']
@@ -59,7 +63,6 @@ falcon_startup_properties = config['configurations']['falcon-startup.properties'
 smokeuser_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
 falcon_env_sh_template = config['configurations']['falcon-env']['content']
 
-falcon_webapp_dir = '/var/lib/falcon/webapp'
 flacon_apps_dir = '/apps/falcon'
 #for create_hdfs_directory
 security_enabled = config['configurations']['cluster-env']['security_enabled']

+ 47 - 15
ambari-server/src/main/resources/stacks/HDP/2.1/services/HIVE/metainfo.xml

@@ -37,31 +37,63 @@
           </dependencies>
         </component>
       </components>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>hive-log4j</config-type>
-        <config-type>hive-exec-log4j</config-type>
-        <config-type>tez-site</config-type>
-        <config-type>hive-env</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.12.0.2.1</version>
       <osSpecifics>
         <osSpecific>
           <osFamily>any</osFamily>
           <packages>
+            <package>
+              <name>hive</name>
+            </package>
             <package>
               <name>hive-hcatalog</name>
             </package>
+            <package>
+              <name>hive-webhcat</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
+            </package>
           </packages>
         </osSpecific>
       </osSpecifics>
-    </service>
 
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>hive-log4j</config-type>
+        <config-type>hive-exec-log4j</config-type>
+        <config-type>tez-site</config-type>
+        <config-type>hive-env</config-type>
+      </configuration-dependencies>
+    </service>
   </services>
 </metainfo>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.1/services/STORM/configuration/storm-env.xml

@@ -48,7 +48,7 @@
 # Set Storm specific environment variables here.
 
 # The java implementation to use.
-export JAVA_HOME={{java_home}}
+export JAVA_HOME={{java64_home}}
 
 # export STORM_CONF_DIR=""
     </value>

+ 0 - 143
ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/configuration/webhcat-site.xml

@@ -1,143 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration supports_final="true">
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.home</name>
-    <value>hive.tar.gz/hive</value>
-    <description>The path to the Hive home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat.home</name>
-    <value>hive.tar.gz/hive/hcatalog</value>
-    <description>The path to the HCat home within the tar. Has no effect if templeton.hive.archive is not set.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value>hive.metastore.local=false, hive.metastore.uris=thrift://localhost:9933, hive.metastore.sasl.enabled=false</value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.queue.name</name>
-    <value>default</value>
-    <description>MapReduce queue name where WebHCat map-only jobs will be submitted to. Can be used to avoid a deadlock where all map slots in the cluster are taken over by Templeton launcher tasks.</description>
-  </property>
-
-</configuration>

+ 0 - 47
ambari-server/src/main/resources/stacks/HDP/2.1/services/WEBHCAT/metainfo.xml

@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.13.0.2.1</version>
-      <osSpecifics>
-        <osSpecific>
-          <osFamily>any</osFamily>
-          <packages>
-            <package>
-              <name>hive-webhcat</name>
-            </package>
-            <package>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-        <config-type>webhcat-env</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

+ 0 - 4
ambari-server/src/main/resources/stacks/HDP/2.2.1/services/HIVE/metainfo.xml

@@ -23,9 +23,5 @@
       <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
       <version>0.14.0.2.2</version>
     </service>
-    <service>
-      <name>HCATALOG</name>
-      <version>0.14.0.2.2</version>
-    </service>
   </services>
 </metainfo>

+ 0 - 26
ambari-server/src/main/resources/stacks/HDP/2.2.1/services/WEBHCAT/metainfo.xml

@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <version>0.14.0.2.2</version>
-    </service>
-  </services>
-</metainfo>

+ 6 - 21
ambari-server/src/main/resources/stacks/HDP/2.2/repos/repoinfo.xml

@@ -18,15 +18,10 @@
 <reposinfo>
   <os type="redhat6">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/BUILDS/2.9.9.9-98</baseurl>
-      <repoid>HDP-2.9.9.9-98</repoid>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.9.9.9</baseurl>
+      <repoid>HDP-2.9.9.9</repoid>
       <reponame>HDP</reponame>
     </repo>
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos6/2.x/updates/2.2.0.0</baseurl>
-      <repoid>HDP-2.2.0.0</repoid>
-      <reponame>HDP-2.2</reponame>
-    </repo>
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos6</baseurl>
       <repoid>HDP-UTILS-1.1.0.17</repoid>
@@ -35,15 +30,10 @@
   </os>
   <os type="redhat5">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/BUILDS/2.9.9.9-98</baseurl>
-      <repoid>HDP-2.9.9.9-98</repoid>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.9.9.9</baseurl>
+      <repoid>HDP-2.9.9.9</repoid>
       <reponame>HDP</reponame>
     </repo>
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0</baseurl>
-      <repoid>HDP-2.2.0.0</repoid>
-      <reponame>HDP-2.2</reponame>
-    </repo>
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/centos5</baseurl>
       <repoid>HDP-UTILS-1.1.0.17</repoid>
@@ -52,15 +42,10 @@
   </os>
   <os type="suse11">
     <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/BUILDS/2.9.9.9-98</baseurl>
-      <repoid>HDP-2.9.9.9-98</repoid>
+      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/updates/2.9.9.9</baseurl>
+      <repoid>HDP-2.9.9.9</repoid>
       <reponame>HDP</reponame>
     </repo>
-    <repo>
-      <baseurl>http://s3.amazonaws.com/dev.hortonworks.com/HDP/suse11/2.x/updates/2.2.0.0</baseurl>
-      <repoid>HDP-2.2.0.0</repoid>
-      <reponame>HDP-2.2</reponame>
-    </repo>
     <repo>
       <baseurl>http://public-repo-1.hortonworks.com/HDP-UTILS-1.1.0.17/repos/suse11</baseurl>
       <repoid>HDP-UTILS-1.1.0.17</repoid>

+ 11 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/FALCON/metainfo.xml

@@ -22,7 +22,17 @@
       <name>FALCON</name>
       <displayName>Falcon</displayName>
       <comment>Data management and processing platform</comment>
-      <version>0.6.0.2.2.0.0</version>
+      <version>0.6.0.2.2.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>falcon_2_9_9_9_117</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
     </service>
   </services>
 </metainfo>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/FLUME/metainfo.xml

@@ -29,7 +29,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>flume_2_9_9_9_98</name>
+              <name>flume_2_9_9_9_117</name>
             </package>
           </packages>
         </osSpecific>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HBASE/metainfo.xml

@@ -31,7 +31,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hbase_2_9_9_9_98</name>
+              <name>hbase_2_9_9_9_117</name>
             </package>
           </packages>
         </osSpecific>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hadoop-env.xml

@@ -23,7 +23,7 @@
 <configuration>
   <property>
     <name>rpm_version</name>
-    <value>2.9.9.9-98</value>
+    <value>2.9.9.9-117</value>
     <description>Hadoop RPM version</description>
   </property>
 </configuration>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/configuration/hdfs-site.xml

@@ -24,7 +24,7 @@
 
   <property>
     <name>dfs.hosts.exclude</name>
-    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/dfs.exclude</value>
+    <value>/usr/hdp/2.9.9.9-117/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
       not permitted to connect to the namenode.  The full pathname of the
       file must be specified.  If the value is empty, no hosts are

+ 2 - 2
ambari-server/src/main/resources/stacks/HDP/2.2/services/HDFS/metainfo.xml

@@ -29,7 +29,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hadoop_2_9_9_9_98</name>
+              <name>hadoop_2_9_9_9_117</name>
             </package>
             <package>
               <name>hadoop-lzo</name>
@@ -53,7 +53,7 @@
               <name>hadoop-lzo-native</name>
             </package>
             <package>
-              <name>hadoop_2_9_9_9_98-libhdfs</name>
+              <name>hadoop_2_9_9_9_117-libhdfs</name>
             </package>
             <package>
               <name>ambari-log4j</name>

+ 5 - 5
ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/configuration/webhcat-site.xml → ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/configuration/webhcat-site.xml

@@ -25,33 +25,33 @@ limitations under the License.
 
   <property>
     <name>templeton.hadoop.conf.dir</name>
-    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+    <value>/usr/hdp/2.9.9.9-117/etc/hadoop/conf</value>
     <description>The path to the Hadoop configuration.</description>
   </property>
 
   <property>
     <name>templeton.jar</name>
-    <value>/usr/hdp/2.9.9.9-98/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <value>/usr/hdp/2.9.9.9-117/hcatalog/share/webhcat/svr/webhcat.jar</value>
     <description>The path to the Templeton jar file.</description>
   </property>
 
   <property>
     <name>templeton.libjars</name>
-    <value>/usr/hdp/2.9.9.9-98/zookeeper/zookeeper.jar</value>
+    <value>/usr/hdp/2.9.9.9-117/zookeeper/zookeeper.jar</value>
     <description>Jars to add the the classpath.</description>
   </property>
 
 
   <property>
     <name>templeton.hadoop</name>
-    <value>/usr/hdp/2.9.9.9-98/hadoop/bin/hadoop</value>
+    <value>/usr/hdp/2.9.9.9-117/hadoop/bin/hadoop</value>
     <description>The path to the Hadoop executable.</description>
   </property>
 
 
   <property>
     <name>templeton.hcat</name>
-    <value>/usr/hdp/2.9.9.9-98/hive/bin/hcat</value>
+    <value>/usr/hdp/2.9.9.9-117/hive/bin/hcat</value>
     <description>The path to the hcatalog executable.</description>
   </property>
 

+ 40 - 8
ambari-server/src/main/resources/stacks/HDP/2.2/services/HIVE/metainfo.xml

@@ -22,23 +22,55 @@
       <name>HIVE</name>
       <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
       <version>0.14.0.2.9.9.9</version>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.14.0.2.9.9.9</version>
       <osSpecifics>
         <osSpecific>
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hive_2_9_9_9_98-hcatalog</name>
+              <name>hive_2_9_9_9_117</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>hive_2_9_9_9_117-hcatalog</name>
+            </package>
+            <package>
+              <name>hive_2_9_9_9_117-webhcat</name>
+            </package>
+            <package>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>redhat5,redhat6,ubuntu12</osFamily>
+          <packages>
+            <package>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osFamily>suse11</osFamily>
+          <packages>
+            <package>
+              <name>mysql-client</name>
             </package>
           </packages>
         </osSpecific>
       </osSpecifics>
     </service>
-
   </services>
 </metainfo>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/configuration/oozie-site.xml

@@ -21,7 +21,7 @@
 
   <property>
     <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-    <value>*=/usr/hdp/2.9.9.9-98/etc/hadoop/conf</value>
+    <value>*=/usr/hdp/2.9.9.9-117/etc/hadoop/conf</value>
     <description>
       Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
       the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is

+ 26 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/OOZIE/metainfo.xml

@@ -22,7 +22,32 @@
       <name>OOZIE</name>
       <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/legal/open-source-faq/"&gt;ExtJS&lt;/a&gt; Library.
       </comment>
-      <version>4.1.0.2.2.0.0</version>
+      <version>4.1.0.2.2.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>oozie_2_9_9_9_117</name>
+            </package>
+            <package>
+              <name>oozie_2_9_9_9_117-client</name>
+            </package>
+            <package>
+              <name>falcon_2_9_9_9_117</name>
+            </package>
+            <package>
+              <name>zip</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <name>extjs</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
     </service>
   </services>
 </metainfo>

+ 1 - 4
ambari-server/src/main/resources/stacks/HDP/2.2/services/PIG/metainfo.xml

@@ -23,19 +23,16 @@
       <displayName>Pig</displayName>
       <comment>Scripting platform for analyzing large datasets</comment>
       <version>0.14.0.2.9.9.9</version>
-
       <osSpecifics>
         <osSpecific>
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>pig_2_9_9_9_98</name>
+              <name>pig_2_9_9_9_117</name>
             </package>
           </packages>
         </osSpecific>
       </osSpecifics>
-
-
     </service>
   </services>
 </metainfo>

+ 14 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/SQOOP/metainfo.xml

@@ -23,7 +23,20 @@
       <comment>Tool for transferring bulk data between Apache Hadoop and
         structured data stores such as relational databases
       </comment>
-      <version>1.4.5.2.2</version>
+      <version>1.4.5.2.9.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>sqoop_2_9_9_9_117</name>
+            </package>
+            <package>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
     </service>
   </services>
 </metainfo>

+ 11 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/STORM/metainfo.xml

@@ -23,7 +23,17 @@
       <name>STORM</name>
       <displayName>Storm</displayName>
       <comment>Apache Hadoop Stream processing framework</comment>
-      <version>0.9.3.2.2.0.0</version>
+      <version>0.9.3.2.9.9.9</version>
+      <osSpecifics>
+        <osSpecific>
+          <osFamily>any</osFamily>
+          <packages>
+            <package>
+              <name>storm_2_9_9_9_117</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
     </service>
   </services>
 </metainfo>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/TEZ/metainfo.xml

@@ -29,7 +29,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>tez_2_9_9_9_98</name>
+              <name>tez_2_9_9_9_117</name>
             </package>
           </packages>
         </osSpecific>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/WEBHCAT/metainfo.xml

@@ -27,7 +27,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hive_2_9_9_9_98-webhcat</name>
+              <name>hive_2_9_9_9_117-webhcat</name>
             </package>
             <package>
               <name>webhcat-tar-hive</name>

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration-mapred/mapred-site.xml

@@ -24,7 +24,7 @@
 
   <property>
     <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/hdp/2.9.9.9-98/hadoop/lib/native/Linux-amd64-64</value>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/hdp/2.9.9.9-117/hadoop/lib/native/Linux-amd64-64</value>
     <description>
       Additional execution environment entries for map and reduce task processes.
       This is not an additive property. You must preserve the original value if

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/configuration/yarn-site.xml

@@ -23,7 +23,7 @@
 
   <property>
     <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/usr/hdp/2.9.9.9-98/etc/hadoop/conf/yarn.exclude</value>
+    <value>/usr/hdp/2.9.9.9-117/etc/hadoop/conf/yarn.exclude</value>
     <description>
       Names a file that contains a list of hosts that are
       not permitted to connect to the resource manager.  The full pathname of the

+ 3 - 3
ambari-server/src/main/resources/stacks/HDP/2.2/services/YARN/metainfo.xml

@@ -38,10 +38,10 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hadoop_2_9_9_9_98-yarn</name>
+              <name>hadoop_2_9_9_9_117-yarn</name>
             </package>
             <package>
-              <name>hadoop_2_9_9_9_98-mapreduce</name>
+              <name>hadoop_2_9_9_9_117-mapreduce</name>
             </package>
           </packages>
         </osSpecific>
@@ -58,7 +58,7 @@
           <osFamily>any</osFamily>
           <packages>
             <package>
-              <name>hadoop_2_9_9_9_98-mapreduce</name>
+              <name>hadoop_2_9_9_9_117-mapreduce</name>
             </package>
           </packages>
         </osSpecific>

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません