浏览代码

AMBARI-22095 Make hooks stack agnostic (dsen)

Dmytro Sen 8 年之前
父节点
当前提交
5b36cdfd87
共有 79 个文件被更改,包括 107 次插入2522 次删除
  1. 2 2
      ambari-agent/pom.xml
  2. 2 3
      ambari-agent/src/main/python/ambari_agent/FileCache.py
  3. 2 2
      ambari-agent/src/test/python/ambari_agent/TestFileCache.py
  4. 2 0
      ambari-server/pom.xml
  5. 16 4
      ambari-server/src/main/assemblies/server.xml
  6. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java
  7. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java
  8. 2 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
  9. 1 28
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java
  10. 5 0
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java
  11. 0 5
      ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java
  12. 0 8
      ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java
  13. 5 2
      ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py
  14. 1 1
      ambari-server/src/main/python/ambari_server/serverConfiguration.py
  15. 1 1
      ambari-server/src/main/python/ambari_server/setupMpacks.py
  16. 2 2
      ambari-server/src/main/resources/scripts/Ambaripreupload.py
  17. 1 1
      ambari-server/src/main/resources/scripts/post-user-creation-hook.sh
  18. 0 0
      ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py
  19. 0 0
      ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py
  20. 0 0
      ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py
  21. 0 0
      ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh
  22. 0 0
      ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py
  23. 0 0
      ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py
  24. 0 0
      ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py
  25. 0 0
      ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py
  26. 0 0
      ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py
  27. 0 0
      ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py
  28. 0 0
      ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py
  29. 0 0
      ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py
  30. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh
  31. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar
  32. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties
  33. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py
  34. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py
  35. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py
  36. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py
  37. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py
  38. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py
  39. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2
  40. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2
  41. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2
  42. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2
  43. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2
  44. 0 0
      ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2
  45. 1 1
      ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py
  46. 0 37
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py
  47. 0 109
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py
  48. 0 140
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py
  49. 0 53
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh
  50. 0 36
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py
  51. 0 254
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py
  52. 0 239
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py
  53. 0 37
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py
  54. 0 115
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py
  55. 0 76
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py
  56. 0 37
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py
  57. 0 29
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py
  58. 0 65
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh
  59. 二进制
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar
  60. 0 134
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties
  61. 0 66
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py
  62. 0 40
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py
  63. 0 364
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py
  64. 0 47
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py
  65. 0 249
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py
  66. 0 43
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2
  67. 0 21
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2
  68. 0 107
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2
  69. 0 81
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2
  70. 0 21
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2
  71. 0 24
      ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2
  72. 0 19
      ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java
  73. 1 0
      ambari-server/src/test/python/TestResourceFilesKeeper.py
  74. 19 7
      ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py
  75. 4 1
      ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py
  76. 11 3
      ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py
  77. 16 5
      ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py
  78. 8 0
      ambari-server/src/test/python/stacks/utils/RMFTestCase.py
  79. 1 0
      contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml

+ 2 - 2
ambari-agent/pom.xml

@@ -318,8 +318,7 @@
                     <include>/cred/lib/*.jar</include>
                     <include>/tools/*.jar</include>
                     <include>/cache/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
-                    <include>/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
-                    <include>/cache/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
+                    <include>/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar</include>
                     <include>/cache/common-services/STORM/0.9.1/package/files/wordCount.jar</include>
                   </includes>
                 </source>
@@ -487,6 +486,7 @@
                   <directory>${resourcesFolder}</directory>
                   <includes>
                     <include>common-services/**</include>
+                    <include>stack-hooks/**</include>
                     <include>stacks/stack_advisor.py</include>
                     <include>stacks/${stack.distribution}/**/*</include>
                   </includes>

+ 2 - 3
ambari-agent/src/main/python/ambari_agent/FileCache.py

@@ -83,11 +83,10 @@ class FileCache():
     Returns a base directory for hooks
     """
     try:
-      hooks_subpath = command['commandParams']['hooks_folder']
+      hooks_path = command['commandParams']['hooks_folder']
     except KeyError:
       return None
-    subpath = os.path.join(self.STACKS_CACHE_DIRECTORY, hooks_subpath)
-    return self.provide_directory(self.cache_dir, subpath,
+    return self.provide_directory(self.cache_dir, hooks_path,
                                   server_url_prefix)
 
 

+ 2 - 2
ambari-agent/src/test/python/ambari_agent/TestFileCache.py

@@ -93,7 +93,7 @@ class TestFileCache(TestCase):
     # Check existing dir case
     command = {
       'commandParams' : {
-        'hooks_folder' : os.path.join('HDP', '2.1.1', 'hooks')
+        'hooks_folder' : 'stack-hooks'
       }
     }
     provide_directory_mock.return_value = "dummy value"
@@ -103,7 +103,7 @@ class TestFileCache(TestCase):
       pprint.pformat(provide_directory_mock.call_args_list[0][0]),
       "('/var/lib/ambari-agent/cache', "
       "{0}, "
-      "'server_url_pref')".format(pprint.pformat(os.path.join('stacks','HDP', '2.1.1', 'hooks'))))
+      "'server_url_pref')".format(pprint.pformat('stack-hooks')))
     self.assertEquals(res, "dummy value")
 
 

+ 2 - 0
ambari-server/pom.xml

@@ -44,6 +44,7 @@
     <customActionsRoot>src/main/resources/custom_actions</customActionsRoot>
     <ambariProperties>conf/unix/ambari.properties</ambariProperties>
     <commonServicesSrcLocation>src/main/resources/common-services</commonServicesSrcLocation>
+    <stackHooksLocation>src/main/resources/stack-hooks</stackHooksLocation>
     <stacksSrcLocation>src/main/resources/stacks/${stack.distribution}</stacksSrcLocation>
     <tarballResourcesFolder>src/main/resources</tarballResourcesFolder>
     <skipPythonTests>false</skipPythonTests>
@@ -1067,6 +1068,7 @@
         <ambariProperties>target/pluggable-stack-definition/conf/unix/ambari.properties</ambariProperties>
         <resourceManagementSrcLocation>target/pluggable-stack-definition/python/resource_management</resourceManagementSrcLocation>
         <commonServicesSrcLocation>target/pluggable-stack-definition/common-services</commonServicesSrcLocation>
+        <stackHooksLocation>target/pluggable-stack-definition/stack-hooks</stackHooksLocation>
         <stacksSrcLocation>target/pluggable-stack-definition/stacks/${stack.distribution}</stacksSrcLocation>
         <resourcesSrcLocation>src/main/resources</resourcesSrcLocation>
         <tarballResourcesFolder>target/pluggable-stack-definition</tarballResourcesFolder>

+ 16 - 4
ambari-server/src/main/assemblies/server.xml

@@ -165,6 +165,22 @@
 	    <include>/STORM/0.9.1/package/files/wordCount.jar</include>
 	  </includes>
     </fileSet>
+    <fileSet>
+      <fileMode>755</fileMode>
+      <directory>${stackHooksLocation}</directory>
+      <outputDirectory>/var/lib/ambari-server/resources/stack-hooks</outputDirectory>
+      <excludes>
+	    <exclude>/before-START/files/fast-hdfs-resource.jar</exclude>
+	  </excludes>
+    </fileSet>
+    <fileSet>
+      <fileMode>644</fileMode>
+      <directory>${stackHooksLocation}</directory>
+      <outputDirectory>/var/lib/ambari-server/resources/stack-hooks</outputDirectory>
+      <includes>
+	    <include>/before-START/files/fast-hdfs-resource.jar</include>
+	  </includes>
+    </fileSet>
     <fileSet>
       <fileMode>755</fileMode>
       <directory>src/main/resources/upgrade/catalog</directory>
@@ -176,8 +192,6 @@
       <outputDirectory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</outputDirectory>
       <excludes>
 	    <exclude>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</exclude>
-        <exclude>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
-        <exclude>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</exclude>
 	  </excludes>
     </fileSet>
     <fileSet>
@@ -186,8 +200,6 @@
       <outputDirectory>/var/lib/ambari-server/resources/stacks/${stack.distribution}</outputDirectory>
       <includes>
 	    <include>/2.1.GlusterFS/services/STORM/package/files/wordCount.jar</include>
-        <include>/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar</include>
-        <include>/3.0/hooks/before-START/files/fast-hdfs-resource.jar</include>
 	  </includes>
     </fileSet>
     <fileSet>

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/actionmanager/ExecutionCommandWrapper.java

@@ -20,6 +20,7 @@ package org.apache.ambari.server.actionmanager;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.VERSION;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.util.HashMap;
 import java.util.Map;
@@ -268,7 +269,7 @@ public class ExecutionCommandWrapper {
           stackId.getStackVersion());
 
         if (!commandParams.containsKey(HOOKS_FOLDER)) {
-          commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+          commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
         }
 
         if (!commandParams.containsKey(SERVICE_PACKAGE_FOLDER)) {

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/agent/HeartbeatMonitor.java

@@ -25,6 +25,7 @@ import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SCRIPT_TY
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_NAME;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.STACK_VERSION;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.util.ArrayList;
 import java.util.Collection;
@@ -340,7 +341,7 @@ public class HeartbeatMonitor implements Runnable {
     commandParams.put(COMMAND_TIMEOUT, commandTimeout);
     commandParams.put(SERVICE_PACKAGE_FOLDER,
        serviceInfo.getServicePackageFolder());
-    commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+    commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
     // Fill host level params
     Map<String, String> hostLevelParams = statusCmd.getHostLevelParams();
     hostLevelParams.put(JDK_LOCATION, ambariManagementController.getJdkResourceUrl());

+ 2 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java

@@ -19,6 +19,7 @@ package org.apache.ambari.server.controller.internal;
 
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.HOOKS_FOLDER;
 import static org.apache.ambari.server.agent.ExecutionCommand.KeyNames.SERVICE_PACKAGE_FOLDER;
+import static org.apache.ambari.server.stack.StackManager.DEFAULT_HOOKS_FOLDER;
 
 import java.text.MessageFormat;
 import java.util.ArrayList;
@@ -907,7 +908,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
           effectiveStackId.getStackVersion(), serviceName);
 
       commandParams.put(SERVICE_PACKAGE_FOLDER, serviceInfo.getServicePackageFolder());
-      commandParams.put(HOOKS_FOLDER, stackInfo.getStackHooksFolder());
+      commandParams.put(HOOKS_FOLDER, DEFAULT_HOOKS_FOLDER);
     }
   }
 

+ 1 - 28
ambari-server/src/main/java/org/apache/ambari/server/stack/StackDirectory.java

@@ -72,10 +72,6 @@ public class StackDirectory extends StackDefinitionDirectory {
    * Filename for theme file at service layer
    */
   public static final String SERVICE_THEME_FILE_NAME = "theme.json";
-  /**
-   * hooks directory path
-   */
-  private String hooksDir;
 
   /**
    * upgrades directory path
@@ -142,15 +138,10 @@ public class StackDirectory extends StackDefinitionDirectory {
    */
   ModuleFileUnmarshaller unmarshaller = new ModuleFileUnmarshaller();
 
-  /**
-   * name of the hooks directory
-   */
-  public static final String HOOKS_FOLDER_NAME = "hooks";
   public static final FilenameFilter FILENAME_FILTER = new FilenameFilter() {
     @Override
     public boolean accept(File dir, String s) {
-      return !(s.equals(".svn") || s.equals(".git") ||
-          s.equals(HOOKS_FOLDER_NAME));
+      return !(s.equals(".svn") || s.equals(".git"));
     }
   };
 
@@ -205,15 +196,6 @@ public class StackDirectory extends StackDefinitionDirectory {
     return getDirectory().getParentFile().getName();
   }
 
-  /**
-   * Obtain the hooks directory path.
-   *
-   * @return hooks directory path
-   */
-  public String getHooksDir() {
-    return hooksDir;
-  }
-
   /**
    * Obtain the upgrades directory path.
    *
@@ -327,20 +309,11 @@ public class StackDirectory extends StackDefinitionDirectory {
    */
   private void parsePath() throws AmbariException {
     Collection<String> subDirs = Arrays.asList(directory.list());
-    if (subDirs.contains(HOOKS_FOLDER_NAME)) {
-      // hooksDir is expected to be relative to stack root
-      hooksDir = getStackDirName() + File.separator + getName() +
-          File.separator + HOOKS_FOLDER_NAME;
-    } else {
-      LOG.debug("Hooks folder {}{}" + HOOKS_FOLDER_NAME + " does not exist", getAbsolutePath(), File.separator);
-    }
-
     if (subDirs.contains(RCO_FILE_NAME)) {
       // rcoFile is expected to be absolute
       rcoFilePath = getAbsolutePath() + File.separator + RCO_FILE_NAME;
     }
 
-
     if (subDirs.contains(KERBEROS_DESCRIPTOR_FILE_NAME)) {
       // kerberosDescriptorFilePath is expected to be absolute
       kerberosDescriptorFilePath = getAbsolutePath() + File.separator + KERBEROS_DESCRIPTOR_FILE_NAME;

+ 5 - 0
ambari-server/src/main/java/org/apache/ambari/server/stack/StackManager.java

@@ -78,6 +78,11 @@ public class StackManager {
    */
   public static final String COMMON_SERVICES = "common-services";
 
+  /**
+   * Prefix used for common stack hooks parent path string
+   */
+  public static final String DEFAULT_HOOKS_FOLDER = "stack-hooks";
+
   /**
    * Prefix used for extension services parent path string
    */

+ 0 - 5
ambari-server/src/main/java/org/apache/ambari/server/stack/StackModule.java

@@ -284,10 +284,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
     mergeConfigurations(parentStack, allStacks, commonServices, extensions);
     mergeRoleCommandOrder(parentStack);
 
-    if (stackInfo.getStackHooksFolder() == null) {
-      stackInfo.setStackHooksFolder(parentStack.getModuleInfo().getStackHooksFolder());
-    }
-
     // grab stack level kerberos.json from parent stack
     if (stackInfo.getKerberosDescriptorFileLocation() == null) {
       stackInfo.setKerberosDescriptorFileLocation(parentStack.getModuleInfo().getKerberosDescriptorFileLocation());
@@ -574,7 +570,6 @@ public class StackModule extends BaseModule<StackModule, StackInfo> implements V
       stackInfo.setMinUpgradeVersion(smx.getVersion().getUpgrade());
       stackInfo.setActive(smx.getVersion().isActive());
       stackInfo.setParentStackVersion(smx.getExtends());
-      stackInfo.setStackHooksFolder(stackDirectory.getHooksDir());
       stackInfo.setRcoFileLocation(stackDirectory.getRcoFilePath());
       stackInfo.setKerberosDescriptorFileLocation(stackDirectory.getKerberosDescriptorFilePath());
       stackInfo.setKerberosDescriptorPreConfigurationFileLocation(stackDirectory.getKerberosDescriptorPreconfigureFilePath());

+ 0 - 8
ambari-server/src/main/java/org/apache/ambari/server/state/StackInfo.java

@@ -437,14 +437,6 @@ public class StackInfo implements Comparable<StackInfo>, Validable {
     this.widgetsDescriptorFileLocation = widgetsDescriptorFileLocation;
   }
 
-  public String getStackHooksFolder() {
-    return stackHooksFolder;
-  }
-
-  public void setStackHooksFolder(String stackHooksFolder) {
-    this.stackHooksFolder = stackHooksFolder;
-  }
-
   /**
    * Set the path of the stack upgrade directory.
    *

+ 5 - 2
ambari-server/src/main/python/ambari_server/resourceFilesKeeper.py

@@ -33,7 +33,7 @@ class ResourceFilesKeeper():
   This class encapsulates all utility methods for resource files maintenance.
   """
 
-  HOOKS_DIR="hooks"
+  STACK_HOOKS_DIR= "stack-hooks"
   PACKAGE_DIR="package"
   STACKS_DIR="stacks"
   COMMON_SERVICES_DIR="common-services"
@@ -43,7 +43,7 @@ class ResourceFilesKeeper():
   EXTENSIONS_DIR="extensions"
 
   # For these directories archives are created
-  ARCHIVABLE_DIRS = [HOOKS_DIR, PACKAGE_DIR]
+  ARCHIVABLE_DIRS = [PACKAGE_DIR]
 
   HASH_SUM_FILE=".hash"
   ARCHIVE_NAME="archive.zip"
@@ -116,6 +116,9 @@ class ResourceFilesKeeper():
     # Iterate over extension directories
     self._iter_update_directory_archive(valid_extensions)
 
+    # stack hooks
+    self._update_resources_subdir_archive(self.STACK_HOOKS_DIR)
+
     # custom actions
     self._update_resources_subdir_archive(self.CUSTOM_ACTIONS_DIR)
 

+ 1 - 1
ambari-server/src/main/python/ambari_server/serverConfiguration.py

@@ -562,7 +562,7 @@ class ServerConfigDefaultsLinux(ServerConfigDefaults):
       (AmbariPath.get("/var/lib/ambari-server/data/cache/"), "700", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/common-services/STORM/0.9.1/package/files/wordCount.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1.GlusterFS/services/STORM/package/files/wordCount.jar"), "644", "{0}", False),
-      (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
+      (AmbariPath.get("/var/lib/ambari-server/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/2.1/services/SMARTSENSE/package/files/view/smartsense-ambari-view-1.4.0.0.60.jar"), "644", "{0}", False),
       (AmbariPath.get("/var/lib/ambari-server/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar"), "644", "{0}", False),
       # Also, /etc/ambari-server/conf/password.dat

+ 1 - 1
ambari-server/src/main/python/ambari_server/setupMpacks.py

@@ -818,7 +818,7 @@ def _install_mpack(options, replay_mode=False, is_upgrade=False):
 
   print_info_msg("Management pack {0}-{1} successfully installed! Please restart ambari-server.".format(mpack_name, mpack_version))
   return mpack_metadata, mpack_name, mpack_version, mpack_staging_dir, mpack_archive_path
-
+# TODO
 def _execute_hook(mpack_metadata, hook_name, base_dir):
   if "hooks" in mpack_metadata:
     hooks = mpack_metadata["hooks"]

+ 2 - 2
ambari-server/src/main/resources/scripts/Ambaripreupload.py

@@ -439,7 +439,7 @@ with Environment() as env:
   # jar shouldn't be used before (read comment below)
   File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
        mode=0644,
-       content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
+       content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar")
   )
   # Create everything in one jar call (this is fast).
   # (! Before everything should be executed with action="create_on_execute/delete_on_execute" for this time-optimization to work)
@@ -460,4 +460,4 @@ with Environment() as env:
       sudo = True
     )
 
-  print "Ambari preupload script completed."
+  print "Ambari preupload script completed."

+ 1 - 1
ambari-server/src/main/resources/scripts/post-user-creation-hook.sh

@@ -135,7 +135,7 @@ check_tools
 prepare_input
 
 # the default implementation creates user home folders; the first argument must be the username
-ambari_sudo "yarn jar /var/lib/ambari-server/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar $JSON_INPUT"
+ambari_sudo "yarn jar /var/lib/ambari-server/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar $JSON_INPUT"
 
 if [ "$DEBUG" -gt "0" ]; then echo "Switch debug OFF";set -x;unset DEBUG; else echo "debug: OFF"; fi
 unset DEBUG

+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/hook.py → ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/hook.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/params.py → ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py → ambari-server/src/main/resources/stack-hooks/after-INSTALL/scripts/shared_initialization.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/files/changeToSecureUid.sh → ambari-server/src/main/resources/stack-hooks/before-ANY/files/changeToSecureUid.sh


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/hook.py → ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/hook.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/params.py → ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-ANY/scripts/shared_initialization.py → ambari-server/src/main/resources/stack-hooks/before-ANY/scripts/shared_initialization.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/hook.py → ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/hook.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/params.py → ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/repo_initialization.py → ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/repo_initialization.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-INSTALL/scripts/shared_initialization.py → ambari-server/src/main/resources/stack-hooks/before-INSTALL/scripts/shared_initialization.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-RESTART/scripts/hook.py → ambari-server/src/main/resources/stack-hooks/before-RESTART/scripts/hook.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/checkForFormat.sh → ambari-server/src/main/resources/stack-hooks/before-START/files/checkForFormat.sh


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar → ambari-server/src/main/resources/stack-hooks/before-START/files/fast-hdfs-resource.jar


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/task-log4j.properties → ambari-server/src/main/resources/stack-hooks/before-START/files/task-log4j.properties


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/files/topology_script.py → ambari-server/src/main/resources/stack-hooks/before-START/files/topology_script.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/custom_extensions.py → ambari-server/src/main/resources/stack-hooks/before-START/scripts/custom_extensions.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/hook.py → ambari-server/src/main/resources/stack-hooks/before-START/scripts/hook.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/params.py → ambari-server/src/main/resources/stack-hooks/before-START/scripts/params.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/rack_awareness.py → ambari-server/src/main/resources/stack-hooks/before-START/scripts/rack_awareness.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/scripts/shared_initialization.py → ambari-server/src/main/resources/stack-hooks/before-START/scripts/shared_initialization.py


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/commons-logging.properties.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/commons-logging.properties.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/exclude_hosts_list.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/exclude_hosts_list.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/hadoop-metrics2.properties.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/hadoop-metrics2.properties.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/health_check.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/health_check.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/include_hosts_list.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/include_hosts_list.j2


+ 0 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/before-START/templates/topology_mappings.data.j2 → ambari-server/src/main/resources/stack-hooks/before-START/templates/topology_mappings.data.j2


+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.3.ECS/services/ECS/package/scripts/ecs_client.py

@@ -59,7 +59,7 @@ class ECSClient(Script):
 
     File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
            mode=0644,
-           content=StaticFile("/var/lib/ambari-agent/cache/stacks/HDP/2.0.6/hooks/before-START/files/fast-hdfs-resource.jar")
+           content=StaticFile("/var/lib/ambari-agent/cache/stack-hooks/before-START/files/fast-hdfs-resource.jar")
     )
 
   def setup_hadoop_env(self, env):

+ 0 - 37
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/hook.py

@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management.libraries.script.hook import Hook
-from shared_initialization import link_configs
-from shared_initialization import setup_config
-from shared_initialization import setup_stack_symlinks
-
-class AfterInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_stack_symlinks(self.stroutfile)
-    setup_config()
-
-    link_configs(self.stroutfile)
-
-if __name__ == "__main__":
-  AfterInstallHook().execute()

+ 0 - 109
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/params.py

@@ -1,109 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.script import Script
-from resource_management.libraries.script.script import get_config_lock_file
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.version import format_stack_version
-from string import lower
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-is_parallel_execution_enabled = int(default("/agentConfigParams/agent/parallel_execution", 0)) == 1
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-# service name
-service_name = config['serviceName']
-
-# logsearch configuration
-logsearch_logfeeder_conf = "/etc/ambari-logsearch-logfeeder/conf"
-
-agent_cache_dir = config['hostLevelParams']['agentCacheDir']
-service_package_folder = config['commandParams']['service_package_folder']
-logsearch_service_name = service_name.lower().replace("_", "-")
-logsearch_config_file_name = 'input.config-' + logsearch_service_name + ".json"
-logsearch_config_file_path = agent_cache_dir + "/" + service_package_folder + "/templates/" + logsearch_config_file_name + ".j2"
-logsearch_config_file_exists = os.path.isfile(logsearch_config_file_path)
-
-# default hadoop params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_conf_empty_dir = None
-
-versioned_stack_root = format('{stack_root}/current')
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#users and groups
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-user_group = config['configurations']['cluster-env']['user_group']
-
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-has_namenode = not len(namenode_host) == 0
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-
-link_configs_lock_file = get_config_lock_file()
-stack_select_lock_file = os.path.join(tmp_dir, "stack_select_lock_file")
-
-upgrade_suspended = default("/roleParams/upgrade_suspended", False)

+ 0 - 140
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -1,140 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import os
-
-import ambari_simplejson as json
-from ambari_jinja2 import Environment as JinjaEnvironment
-from resource_management.core.logger import Logger
-from resource_management.core.resources.system import Directory, File
-from resource_management.core.source import InlineTemplate, Template
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions.default import default
-from resource_management.libraries.functions.format import format
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.fcntl_based_process_lock import FcntlBasedProcessLock
-from resource_management.libraries.resources.xml_config import XmlConfig
-from resource_management.libraries.script import Script
-
-
-def setup_stack_symlinks(struct_out_file):
-  """
-  Invokes <stack-selector-tool> set all against a calculated fully-qualified, "normalized" version based on a
-  stack version, such as "2.3". This should always be called after a component has been
-  installed to ensure that all HDP pointers are correct. The stack upgrade logic does not
-  interact with this since it's done via a custom command and will not trigger this hook.
-  :return:
-  """
-  import params
-  if params.upgrade_suspended:
-    Logger.warning("Skipping running stack-selector-tool because there is a suspended upgrade")
-    return
-
-  if params.host_sys_prepped:
-    Logger.warning("Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.")
-    return
-
-  # get the packages which the stack-select tool should be used on
-  stack_select_packages = stack_select.get_packages(stack_select.PACKAGE_SCOPE_INSTALL)
-  if stack_select_packages is None:
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("There is no advertised version for this component stored in {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.stack_select_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for package in stack_select_packages:
-      stack_select.select(package, json_version)
-
-
-def setup_config():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-
-  is_hadoop_conf_dir_present = False
-  if hasattr(params, "hadoop_conf_dir") and params.hadoop_conf_dir is not None and os.path.exists(params.hadoop_conf_dir):
-    is_hadoop_conf_dir_present = True
-  else:
-    Logger.warning("Parameter hadoop_conf_dir is missing or directory does not exist. This is expected if this host does not have any Hadoop components.")
-
-  if is_hadoop_conf_dir_present and (params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS'):
-    # create core-site only if the hadoop config diretory exists
-    XmlConfig("core-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['core-site'],
-              configuration_attributes=params.config['configuration_attributes']['core-site'],
-              owner=params.hdfs_user,
-              group=params.user_group,
-              only_if=format("ls {hadoop_conf_dir}"))
-
-  Directory(params.logsearch_logfeeder_conf,
-            mode=0755,
-            cd_access='a',
-            create_parents=True
-            )
-
-  if params.logsearch_config_file_exists:
-    File(format("{logsearch_logfeeder_conf}/" + params.logsearch_config_file_name),
-         content=Template(params.logsearch_config_file_path,extra_imports=[default])
-         )
-  else:
-    Logger.warning('No logsearch configuration exists at ' + params.logsearch_config_file_path)
-
-
-def load_version(struct_out_file):
-  """
-  Load version from file.  Made a separate method for testing
-  """
-  json_version = None
-  try:
-    if os.path.exists(struct_out_file):
-      with open(struct_out_file, 'r') as fp:
-        json_info = json.load(fp)
-        json_version = json_info['version']
-  except:
-    pass
-
-  return json_version
-  
-
-def link_configs(struct_out_file):
-  """
-  Links configs, only on a fresh install of HDP-2.3 and higher
-  """
-  import params
-
-  if not Script.is_stack_greater_or_equal("2.3"):
-    Logger.info("Can only link configs for HDP-2.3 and higher.")
-    return
-
-  json_version = load_version(struct_out_file)
-
-  if not json_version:
-    Logger.info("Could not load 'version' from {0}".format(struct_out_file))
-    return
-
-  # On parallel command execution this should be executed by a single process at a time.
-  with FcntlBasedProcessLock(params.link_configs_lock_file, enabled = params.is_parallel_execution_enabled, skip_fcntl_failures = True):
-    for k, v in conf_select.get_package_dirs().iteritems():
-      conf_select.convert_conf_directories_to_symlinks(k, json_version, v)

+ 0 - 53
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/files/changeToSecureUid.sh

@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-set -e
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-sudo_prefix="/var/lib/ambari-agent/ambari-sudo.sh -H -E"
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-$sudo_prefix usermod -u $newUid $username && for dir in ${dir_array[@]} ; do ls $dir 2> /dev/null && echo "Changing permission for $dir" && $sudo_prefix chown -Rh $newUid $dir ; done
-exit 0

+ 0 - 36
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/hook.py

@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from shared_initialization import *
-
-class BeforeAnyHook(Hook):
-
-  def hook(self, env):
-    import params
-    env.set_params(params)
-
-    setup_users()
-    if params.has_namenode or params.dfs_type == 'HCFS':
-      setup_hadoop_env()
-    setup_java()
-
-if __name__ == "__main__":
-  BeforeAnyHook().execute()
-

+ 0 - 254
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/params.py

@@ -1,254 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import collections
-import re
-import os
-import ast
-
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-from resource_management.libraries.script import Script
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions.is_empty import is_empty
-from resource_management.libraries.functions.version import format_stack_version
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.libraries.functions.expect import expect
-from resource_management.libraries.functions import StackFeature
-from resource_management.libraries.functions.stack_features import check_stack_feature
-from resource_management.libraries.functions.stack_features import get_stack_feature_version
-from ambari_commons.os_check import OSCheck
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = config['hostLevelParams']['java_home']
-java_version = expect("/hostLevelParams/java_version", int)
-jdk_location = config['hostLevelParams']['jdk_location']
-
-sudo = AMBARI_SUDO_BINARY
-
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-upgrade_type = Script.get_upgrade_type(default("/commandParams/upgrade_type", ""))
-version = default("/commandParams/version", None)
-# Handle upgrade and downgrade
-if (upgrade_type is not None) and version:
-  stack_version_formatted = format_stack_version(version)
-
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-
-# Some datanode settings
-dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
-dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
-dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
-dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
-secure_dn_ports_are_in_use = False
-
-def get_port(address):
-  """
-  Extracts port from the address like 0.0.0.0:1019
-  """
-  if address is None:
-    return None
-  m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
-  if m is not None:
-    return int(m.group(2))
-  else:
-    return None
-
-def is_secure_port(port):
-  """
-  Returns True if port is root-owned at *nix systems
-  """
-  if port is not None:
-    return port < 1024
-  else:
-    return False
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-# upgrades would cause these directories to have a version instead of "current"
-# which would cause a lot of problems when writing out hadoop-env.sh; instead
-# force the use of "current" in the hook
-hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
-hadoop_home = stack_select.get_hadoop_dir("home")
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-
-hadoop_conf_empty_dir = None
-hadoop_secure_dn_user = hdfs_user
-hadoop_dir = "/etc/hadoop"
-versioned_stack_root = format('{stack_root}/current')
-hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir")
-datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
-is_datanode_max_locked_memory_set = not is_empty(config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory'])
-
-if not security_enabled:
-  hadoop_secure_dn_user = '""'
-else:
-  dfs_dn_port = get_port(dfs_dn_addr)
-  dfs_dn_http_port = get_port(dfs_dn_http_addr)
-  dfs_dn_https_port = get_port(dfs_dn_https_addr)
-  # We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
-  if dfs_http_policy == "HTTPS_ONLY":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_https_port)
-  elif dfs_http_policy == "HTTP_AND_HTTPS":
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port) or is_secure_port(dfs_dn_https_port)
-  else:   # params.dfs_http_policy == "HTTP_ONLY" or not defined:
-    secure_dn_ports_are_in_use = is_secure_port(dfs_dn_port) or is_secure_port(dfs_dn_http_port)
-  if secure_dn_ports_are_in_use:
-    hadoop_secure_dn_user = hdfs_user
-  else:
-    hadoop_secure_dn_user = '""'
-
-#hadoop params
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-nfsgateway_heapsize = config['configurations']['hadoop-env']['nfsgateway_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-hadoop_env_sh_template = config['configurations']['hadoop-env']['content']
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-oozie_user = config['configurations']['oozie-env']["oozie_user"]
-falcon_user = config['configurations']['falcon-env']["falcon_user"]
-ranger_user = config['configurations']['ranger-env']["ranger_user"]
-zeppelin_user = config['configurations']['zeppelin-env']["zeppelin_user"]
-zeppelin_group = config['configurations']['zeppelin-env']["zeppelin_group"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-falcon_server_hosts = default("/clusterHostInfo/falcon_server_hosts", [])
-ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
-zeppelin_master_hosts = default("/clusterHostInfo/zeppelin_master_hosts", [])
-
-# get the correct version to use for checking stack features
-version_for_stack_feature_checks = get_stack_feature_version(config)
-
-has_namenode = not len(namenode_host) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_tez = 'tez-site' in config['configurations']
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_falcon_server_hosts = not len(falcon_server_hosts) == 0
-has_ranger_admin = not len(ranger_admin_hosts) == 0
-has_zeppelin_master = not len(zeppelin_master_hosts) == 0
-stack_supports_zk_security = check_stack_feature(StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks)
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-
-if has_namenode or dfs_type == 'HCFS':
-    hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-    hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-ranger_group = config['configurations']['ranger-env']['ranger_group']
-dfs_cluster_administrators_group = config['configurations']['hdfs-site']["dfs.cluster.administrators"]
-
-sysprep_skip_create_users_and_groups = default("/configurations/cluster-env/sysprep_skip_create_users_and_groups", False)
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-fetch_nonlocal_groups = config['configurations']['cluster-env']["fetch_nonlocal_groups"]
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-user_to_groups_dict = {}
-
-#Append new user-group mapping to the dict
-try:
-  user_group_map = ast.literal_eval(config['hostLevelParams']['user_groups'])
-  for key in user_group_map.iterkeys():
-    user_to_groups_dict[key] = user_group_map[key]
-except ValueError:
-  print('User Group mapping (user_group) is missing in the hostLevelParams')
-
-user_to_gid_dict = collections.defaultdict(lambda:user_group)
-
-user_list = json.loads(config['hostLevelParams']['user_list'])
-group_list = json.loads(config['hostLevelParams']['group_list'])
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-tez_am_view_acls = config['configurations']['tez-site']["tez.am.view-acls"]
-override_uid = str(default("/configurations/cluster-env/override_uid", "true")).lower()
-
-# if NN HA on secure clutser, access Zookeper securely
-if stack_supports_zk_security and dfs_ha_enabled and security_enabled:
-  hadoop_zkfc_opts=format("-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username=zookeeper -Djava.security.auth.login.config={hadoop_conf_secure_dir}/hdfs_jaas.conf -Dzookeeper.sasl.clientconfig=Client")

+ 0 - 239
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-ANY/scripts/shared_initialization.py

@@ -1,239 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import getpass
-import tempfile
-from copy import copy
-from resource_management.libraries.functions.version import compare_versions
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  should_create_users_and_groups = False
-  if params.host_sys_prepped:
-    should_create_users_and_groups = not params.sysprep_skip_create_users_and_groups
-  else:
-    should_create_users_and_groups = not params.ignore_groupsusers_create
-
-  if should_create_users_and_groups:
-    for group in params.group_list:
-      Group(group,
-      )
-
-    for user in params.user_list:
-      User(user,
-          gid = params.user_to_gid_dict[user],
-          groups = params.user_to_groups_dict[user],
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-      )
-
-    if params.override_uid == "true":
-      set_uid(params.smoke_user, params.smoke_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for smoke user as host is sys prepped')
-  else:
-    Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
-    pass
-
-
-  if params.has_hbase_masters:
-    Directory (params.hbase_tmp_dir,
-               owner = params.hbase_user,
-               mode=0775,
-               create_parents = True,
-               cd_access="a",
-    )
-    if params.override_uid == "true":
-      set_uid(params.hbase_user, params.hbase_user_dirs)
-    else:
-      Logger.info('Skipping setting uid for hbase user as host is sys prepped')
-
-  if should_create_users_and_groups:
-    if params.has_namenode:
-      create_dfs_cluster_admins()
-    if params.has_tez and params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.3') >= 0:
-      create_tez_am_view_acls()
-  else:
-    Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
-
-def create_dfs_cluster_admins():
-  """
-  dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
-
-  User(params.hdfs_user,
-    groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-  )
-
-def create_tez_am_view_acls():
-
-  """
-  tez.am.view-acls support format <comma-delimited list of usernames><space><comma-delimited list of group names>
-  """
-  import params
-
-  if not params.tez_am_view_acls.startswith("*"):
-    create_users_and_groups(params.tez_am_view_acls)
-
-def create_users_and_groups(user_and_groups):
-
-  import params
-
-  parts = re.split('\s', user_and_groups)
-  if len(parts) == 1:
-    parts.append("")
-
-  users_list = parts[0].strip(",").split(",") if parts[0] else []
-  groups_list = parts[1].strip(",").split(",") if parts[1] else []
-
-  if users_list:
-    User(users_list,
-          fetch_nonlocal_groups = params.fetch_nonlocal_groups
-    )
-
-  if groups_list:
-    Group(copy(groups_list),
-    )
-  return groups_list
-    
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  import params
-
-  File(format("{tmp_dir}/changeUid.sh"),
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
-  Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
-          not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
-    
-def setup_hadoop_env():
-  import params
-  stackversion = params.stack_version_unformatted
-  Logger.info("FS Type: {0}".format(params.dfs_type))
-  if params.has_namenode or stackversion.find('Gluster') >= 0 or params.dfs_type == 'HCFS':
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-
-    # create /etc/hadoop
-    Directory(params.hadoop_dir, mode=0755)
-
-    # HDP < 2.2 used a conf -> conf.empty symlink for /etc/hadoop/
-    if Script.is_stack_less_than("2.2"):
-      Directory(params.hadoop_conf_empty_dir, create_parents = True, owner="root",
-        group=params.user_group )
-
-      Link(params.hadoop_conf_dir, to=params.hadoop_conf_empty_dir,
-         not_if=format("ls {hadoop_conf_dir}"))
-
-    # write out hadoop-env.sh, but only if the directory exists
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
-        group=params.user_group,
-        content=InlineTemplate(params.hadoop_env_sh_template))
-
-    # Create tmp dir for java.io.tmpdir
-    # Handle a situation when /tmp is set to noexec
-    Directory(params.hadoop_java_io_tmpdir,
-              owner=params.hdfs_user,
-              group=params.user_group,
-              mode=01777
-    )
-
-def setup_java():
-  """
-  Install jdk using specific params.
-  Install ambari jdk as well if the stack and ambari jdk are different.
-  """
-  import params
-  __setup_java(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name)
-  if params.ambari_java_home and params.ambari_java_home != params.java_home:
-    __setup_java(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name)
-
-def __setup_java(custom_java_home, custom_jdk_name):
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-  java_exec = format("{custom_java_home}/bin/java")
-
-  if not os.path.isfile(java_exec):
-    if not params.jdk_name: # if custom jdk is used.
-      raise Fail(format("Unable to access {java_exec}. Confirm you have copied jdk to this host."))
-
-    jdk_curl_target = format("{tmp_dir}/{custom_jdk_name}")
-    java_dir = os.path.dirname(params.java_home)
-
-    Directory(params.artifact_dir,
-              create_parents = True,
-              )
-
-    File(jdk_curl_target,
-         content = DownloadSource(format("{jdk_location}/{custom_jdk_name}")),
-         not_if = format("test -f {jdk_curl_target}")
-         )
-
-    File(jdk_curl_target,
-         mode = 0755,
-         )
-
-    tmp_java_dir = tempfile.mkdtemp(prefix="jdk_tmp_", dir=params.tmp_dir)
-
-    try:
-      if params.jdk_name.endswith(".bin"):
-        chmod_cmd = ("chmod", "+x", jdk_curl_target)
-        install_cmd = format("cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-      elif params.jdk_name.endswith(".gz"):
-        chmod_cmd = ("chmod","a+x", java_dir)
-        install_cmd = format("cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
-
-      Directory(java_dir
-                )
-
-      Execute(chmod_cmd,
-              sudo = True,
-              )
-
-      Execute(install_cmd,
-              )
-
-    finally:
-      Directory(tmp_java_dir, action="delete")
-
-    File(format("{custom_java_home}/bin/java"),
-         mode=0755,
-         cd_access="a",
-         )
-    Execute(('chmod', '-R', '755', params.java_home),
-            sudo = True,
-            )

+ 0 - 37
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/hook.py

@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-from repo_initialization import *
-
-class BeforeInstallHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-    
-    install_repos()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeInstallHook().execute()

+ 0 - 115
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/params.py

@@ -1,115 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.constants import AMBARI_SUDO_BINARY
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from resource_management.core.system import System
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import default, format
-from resource_management.libraries.functions.expect import expect
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-sudo = AMBARI_SUDO_BINARY
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-agent_stack_retry_on_unavailability = config['hostLevelParams']['agent_stack_retry_on_unavailability']
-agent_stack_retry_count = expect("/hostLevelParams/agent_stack_retry_count", int)
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-#users and groups
-hbase_user = config['configurations']['hbase-env']['hbase_user']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-gmetad_user = config['configurations']['ganglia-env']["gmetad_user"]
-gmond_user = config['configurations']['ganglia-env']["gmond_user"]
-tez_user = config['configurations']['tez-env']["tez_user"]
-
-user_group = config['configurations']['cluster-env']['user_group']
-proxyuser_group = default("/configurations/hadoop-env/proxyuser_group","users")
-
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-
-# repo templates
-repo_rhel_suse =  config['configurations']['cluster-env']['repo_suse_rhel_template']
-repo_ubuntu =  config['configurations']['cluster-env']['repo_ubuntu_template']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-falcon_host =  default('/clusterHostInfo/falcon_server_hosts', [])
-
-has_sqoop_client = 'sqoop-env' in config['configurations']
-has_namenode = not len(namenode_host) == 0
-has_hs = not len(hs_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-has_falcon_server = not len(falcon_host) == 0
-has_tez = 'tez-site' in config['configurations']
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = "/tmp/hbase-hbase"
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-#java params
-java_home = config['hostLevelParams']['java_home']
-artifact_dir = format("{tmp_dir}/AMBARI-artifacts/")
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-ignore_groupsusers_create = default("/configurations/cluster-env/ignore_groupsusers_create", False)
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-smoke_user_dirs = format("/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-if has_hbase_masters:
-  hbase_user_dirs = format("/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-#repo params
-repo_info = config['hostLevelParams']['repo_info']
-service_repo_info = default("/hostLevelParams/service_repo_info",None)
-
-repo_file = default("/repositoryFile", None)

+ 0 - 76
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/repo_initialization.py

@@ -1,76 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.resources.repository import Repository
-from resource_management.libraries.functions.repository_util import create_repo_files, CommandRepository
-from resource_management.core.logger import Logger
-import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
-
-# components_lits = repoName + postfix
-_UBUNTU_REPO_COMPONENTS_POSTFIX = ["main"]
-
-def _alter_repo(action, repo_string, repo_template):
-  """
-  @param action: "delete" or "create"
-  @param repo_string: e.g. "[{\"baseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\",\"osType\":\"centos6\",\"repoId\":\"HDP-2.0._\",\"repoName\":\"HDP\",\"defaultBaseUrl\":\"http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0\"}]"
-  """
-  repo_dicts = json.loads(repo_string)
-
-  if not isinstance(repo_dicts, list):
-    repo_dicts = [repo_dicts]
-
-  if 0 == len(repo_dicts):
-    Logger.info("Repository list is empty. Ambari may not be managing the repositories.")
-  else:
-    Logger.info("Initializing {0} repositories".format(str(len(repo_dicts))))
-
-  for repo in repo_dicts:
-    if not 'baseUrl' in repo:
-      repo['baseUrl'] = None
-    if not 'mirrorsList' in repo:
-      repo['mirrorsList'] = None
-    
-    ubuntu_components = [ repo['repoName'] ] + _UBUNTU_REPO_COMPONENTS_POSTFIX
-    
-    Repository(repo['repoId'],
-               action = action,
-               base_url = repo['baseUrl'],
-               mirror_list = repo['mirrorsList'],
-               repo_file_name = repo['repoName'],
-               repo_template = repo_template,
-               components = ubuntu_components, # ubuntu specific
-    )
-
-def install_repos():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  template = params.repo_rhel_suse if OSCheck.is_suse_family() or OSCheck.is_redhat_family() else params.repo_ubuntu
-
-  # use this newer way of specifying repositories, if available
-  if params.repo_file is not None:
-    create_repo_files(template, CommandRepository(params.repo_file))
-    return
-
-  _alter_repo("create", params.repo_info, template)
-
-  if params.service_repo_info:
-    _alter_repo("create", params.service_repo_info, template)

+ 0 - 37
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-INSTALL/scripts/shared_initialization.py

@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import stack_tools
-from resource_management.libraries.functions.version import compare_versions
-from resource_management.core.resources.packaging import Package
-
-def install_packages():
-  import params
-  if params.host_sys_prepped:
-    return
-
-  packages = ['unzip', 'curl']
-  if params.stack_version_formatted != "" and compare_versions(params.stack_version_formatted, '2.2') >= 0:
-    stack_selector_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
-    packages.append(stack_selector_package)
-  Package(packages,
-          retry_on_repo_unavailability=params.agent_stack_retry_on_unavailability,
-          retry_count=params.agent_stack_retry_count)

+ 0 - 29
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-RESTART/scripts/hook.py

@@ -1,29 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class BeforeRestartHook(Hook):
-
-  def hook(self, env):
-    self.run_custom_hook('before-START')
-
-if __name__ == "__main__":
-  BeforeRestartHook().execute()
-

+ 0 - 65
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/checkForFormat.sh

@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export bin_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  /var/lib/ambari-agent/ambari-sudo.sh rm -f ${mark_file}
-  /var/lib/ambari-agent/ambari-sudo.sh mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    /var/lib/ambari-agent/ambari-sudo.sh su ${hdfs_user} - -s /bin/bash -c "export PATH=$PATH:$bin_dir ; yes Y | hdfs --config ${conf_dir} ${command}"
-    (( EXIT_CODE = $EXIT_CODE | $? ))
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

二进制
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/fast-hdfs-resource.jar


+ 0 - 134
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/task-log4j.properties

@@ -1,134 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
- 
-# Removes "deprecated" messages
-log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN

+ 0 - 66
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/files/topology_script.py

@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import sys, os
-from string import join
-import ConfigParser
-
-
-DEFAULT_RACK = "/default-rack"
-DATA_FILE_NAME =  os.path.dirname(os.path.abspath(__file__)) + "/topology_mappings.data"
-SECTION_NAME = "network_topology"
-
-class TopologyScript():
-
-  def load_rack_map(self):
-    try:
-      #RACK_MAP contains both host name vs rack and ip vs rack mappings
-      mappings = ConfigParser.ConfigParser()
-      mappings.read(DATA_FILE_NAME)
-      return dict(mappings.items(SECTION_NAME))
-    except ConfigParser.NoSectionError:
-      return {}
-
-  def get_racks(self, rack_map, args):
-    if len(args) == 1:
-      return DEFAULT_RACK
-    else:
-      return join([self.lookup_by_hostname_or_ip(input_argument, rack_map) for input_argument in args[1:]],)
-
-  def lookup_by_hostname_or_ip(self, hostname_or_ip, rack_map):
-    #try looking up by hostname
-    rack = rack_map.get(hostname_or_ip)
-    if rack is not None:
-      return rack
-    #try looking up by ip
-    rack = rack_map.get(self.extract_ip(hostname_or_ip))
-    #try by localhost since hadoop could be passing in 127.0.0.1 which might not be mapped
-    return rack if rack is not None else rack_map.get("localhost.localdomain", DEFAULT_RACK)
-
-  #strips out port and slashes in case hadoop passes in something like 127.0.0.1/127.0.0.1:50010
-  def extract_ip(self, container_string):
-    return container_string.split("/")[0].split(":")[0]
-
-  def execute(self, args):
-    rack_map = self.load_rack_map()
-    rack = self.get_racks(rack_map, args)
-    print rack
-
-if __name__ == "__main__":
-  TopologyScript().execute(sys.argv)

+ 0 - 40
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/hook.py

@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from rack_awareness import create_topology_script_and_mapping
-from shared_initialization import setup_hadoop, setup_configs, create_javahome_symlink, setup_unlimited_key_jce_policy
-
-class BeforeStartHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    self.run_custom_hook('before-ANY')
-    env.set_params(params)
-
-    setup_hadoop()
-    setup_configs()
-    create_javahome_symlink()
-    create_topology_script_and_mapping()
-    setup_unlimited_key_jce_policy()
-
-if __name__ == "__main__":
-  BeforeStartHook().execute()

+ 0 - 364
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/params.py

@@ -1,364 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management.libraries.functions import conf_select
-from resource_management.libraries.functions import stack_select
-from resource_management.libraries.functions import default
-from resource_management.libraries.functions import format_jvm_option
-from resource_management.libraries.functions import format
-from resource_management.libraries.functions.version import format_stack_version, compare_versions
-from ambari_commons.os_check import OSCheck
-from resource_management.libraries.script.script import Script
-from resource_management.libraries.functions import get_kinit_path
-from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
-from resource_management.libraries.resources.hdfs_resource import HdfsResource
-
-config = Script.get_config()
-tmp_dir = Script.get_tmp_dir()
-artifact_dir = tmp_dir + "/AMBARI-artifacts"
-
-# Global flag enabling or disabling the sysprep feature
-host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
-
-# Whether to skip copying fast-hdfs-resource.jar to /var/lib/ambari-agent/lib/
-# This is required if tarballs are going to be copied to HDFS, so set to False
-sysprep_skip_copy_fast_jar_hdfs = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_copy_fast_jar_hdfs", False)
-
-# Whether to skip setting up the unlimited key JCE policy
-sysprep_skip_setup_jce = host_sys_prepped and default("/configurations/cluster-env/sysprep_skip_setup_jce", False)
-
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-
-dfs_type = default("/commandParams/dfs_type", "")
-stack_root = Script.get_stack_root()
-hadoop_conf_dir = "/etc/hadoop/conf"
-component_list = default("/localComponents", [])
-
-hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
-
-hadoop_metrics2_properties_content = config['configurations']['hadoop-metrics2.properties']['content']
-
-# hadoop default params
-mapreduce_libs_path = format("{stack_root}/current/hadoop-mapreduce-client/*")
-
-hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
-hadoop_lib_home = stack_select.get_hadoop_dir("lib")
-hadoop_bin = stack_select.get_hadoop_dir("sbin")
-hadoop_home = stack_select.get_hadoop_dir("home")
-create_lib_snappy_symlinks = False
-
-  
-current_service = config['serviceName']
-
-#security params
-security_enabled = config['configurations']['cluster-env']['security_enabled']
-
-ambari_server_resources_url = default("/hostLevelParams/jdk_location", None)
-if ambari_server_resources_url is not None and ambari_server_resources_url.endswith('/'):
-  ambari_server_resources_url = ambari_server_resources_url[:-1]
-
-# Unlimited key JCE policy params
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-unlimited_key_jce_required = default("/hostLevelParams/unlimited_key_jce_required", False)
-jdk_name = default("/hostLevelParams/jdk_name", None)
-java_home = default("/hostLevelParams/java_home", None)
-java_exec = "{0}/bin/java".format(java_home) if java_home is not None else "/bin/java"
-
-#users and groups
-has_hadoop_env = 'hadoop-env' in config['configurations']
-mapred_user = config['configurations']['mapred-env']['mapred_user']
-hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
-yarn_user = config['configurations']['yarn-env']['yarn_user']
-
-user_group = config['configurations']['cluster-env']['user_group']
-
-#hosts
-hostname = config["hostname"]
-ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-cluster_name = config["clusterName"]
-set_instanceId = "false"
-if 'cluster-env' in config['configurations'] and \
-        'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
-  ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
-  set_instanceId = "true"
-else:
-  ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
-
-has_namenode = not len(namenode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_oozie_server = not len(oozie_servers) == 0
-has_hcat_server_host = not len(hcat_server_hosts) == 0
-has_hive_server_host = not len(hive_server_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_metric_collector = not len(ams_collector_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-metric_collector_port = None
-if has_metric_collector:
-  if 'cluster-env' in config['configurations'] and \
-      'metrics_collector_external_port' in config['configurations']['cluster-env']:
-    metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
-  else:
-    metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
-    if metric_collector_web_address.find(':') != -1:
-      metric_collector_port = metric_collector_web_address.split(':')[1]
-    else:
-      metric_collector_port = '6188'
-  if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
-    metric_collector_protocol = 'https'
-  else:
-    metric_collector_protocol = 'http'
-  metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
-  metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
-  metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
-
-  pass
-metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
-metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
-host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True)
-host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888)
-
-# Cluster Zookeeper quorum
-zookeeper_quorum = None
-if has_zk_host:
-  if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
-    zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
-  else:
-    zookeeper_clientPort = '2181'
-  zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts'])
-  # last port config
-  zookeeper_quorum += ':' + zookeeper_clientPort
-
-#hadoop params
-
-if has_namenode or dfs_type == 'HCFS':
-  hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-  hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
-  task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-
-hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
-hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
-hbase_tmp_dir = "/tmp/hbase-hbase"
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-oracle_driver_symlink_url = format("{ambari_server_resources_url}/oracle-jdbc-driver.jar")
-mysql_driver_symlink_url = format("{ambari_server_resources_url}/mysql-jdbc-driver.jar")
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
-  rca_enabled =  config['configurations']['hadoop-env']['rca_enabled']
-else:
-  rca_enabled = False
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-
-jsvc_path = "/usr/lib/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
-namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
-namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
-namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
-namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
-
-jtnode_opt_newsize = "200m"
-jtnode_opt_maxnewsize = "200m"
-jtnode_heapsize =  "1024m"
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
-
-#log4j.properties
-if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
-  log4j_props = config['configurations']['hdfs-log4j']['content']
-  if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
-    log4j_props += config['configurations']['yarn-log4j']['content']
-else:
-  log4j_props = None
-
-refresh_topology = False
-command_params = config["commandParams"] if "commandParams" in config else None
-if command_params is not None:
-  refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
-
-ambari_java_home = default("/commandParams/ambari_java_home", None)
-ambari_jdk_name = default("/commandParams/ambari_jdk_name", None)
-ambari_jce_name = default("/commandParams/ambari_jce_name", None)
-  
-ambari_libs_dir = "/var/lib/ambari-agent/lib"
-is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
-default_fs = config['configurations']['core-site']['fs.defaultFS']
-
-#host info
-all_hosts = default("/clusterHostInfo/all_hosts", [])
-all_racks = default("/clusterHostInfo/all_racks", [])
-all_ipv4_ips = default("/clusterHostInfo/all_ipv4_ips", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-
-#topology files
-net_topology_script_file_path = "/etc/hadoop/conf/topology_script.py"
-net_topology_script_dir = os.path.dirname(net_topology_script_file_path)
-net_topology_mapping_data_file_name = 'topology_mappings.data'
-net_topology_mapping_data_file_path = os.path.join(net_topology_script_dir, net_topology_mapping_data_file_name)
-
-#Added logic to create /tmp and /user directory for HCFS stack.  
-has_core_site = 'core-site' in config['configurations']
-hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
-kinit_path_local = get_kinit_path()
-stack_version_unformatted = config['hostLevelParams']['stack_version']
-stack_version_formatted = format_stack_version(stack_version_unformatted)
-hadoop_bin_dir = stack_select.get_hadoop_dir("bin")
-hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
-hdfs_site = config['configurations']['hdfs-site']
-smoke_user =  config['configurations']['cluster-env']['smokeuser']
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 0770
-
-
-##### Namenode RPC ports - metrics config section start #####
-
-# Figure out the rpc ports for current namenode
-nn_rpc_client_port = None
-nn_rpc_dn_port = None
-nn_rpc_healthcheck_port = None
-
-namenode_id = None
-namenode_rpc = None
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None)
-if dfs_ha_nameservices is None:
-  dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-
-dfs_ha_namemodes_ids_list = []
-other_namenode_id = None
-
-if dfs_ha_namenode_ids:
- dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
- dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
- if dfs_ha_namenode_ids_array_len > 1:
-   dfs_ha_enabled = True
-
-if dfs_ha_enabled:
- for nn_id in dfs_ha_namemodes_ids_list:
-   nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-   if hostname in nn_host:
-     namenode_id = nn_id
-     namenode_rpc = nn_host
-   pass
- pass
-else:
-  namenode_rpc = default('/configurations/hdfs-site/dfs.namenode.rpc-address', default_fs)
-
-# if HDFS is not installed in the cluster, then don't try to access namenode_rpc
-if "core-site" in config['configurations'] and namenode_rpc:
-  port_str = namenode_rpc.split(':')[-1].strip()
-  try:
-    nn_rpc_client_port = int(port_str)
-  except ValueError:
-    nn_rpc_client_port = None
-
-if namenode_rpc:
- nn_rpc_client_port = namenode_rpc.split(':')[1].strip()
-
-if dfs_ha_enabled:
- dfs_service_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.servicerpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address.{dfs_ha_nameservices}.{namenode_id}'), None)
-else:
- dfs_service_rpc_address = default('/configurations/hdfs-site/dfs.namenode.servicerpc-address', None)
- dfs_lifeline_rpc_address = default(format('/configurations/hdfs-site/dfs.namenode.lifeline.rpc-address'), None)
-
-if dfs_service_rpc_address:
- nn_rpc_dn_port = dfs_service_rpc_address.split(':')[1].strip()
-
-if dfs_lifeline_rpc_address:
- nn_rpc_healthcheck_port = dfs_lifeline_rpc_address.split(':')[1].strip()
-
-is_nn_client_port_configured = False if nn_rpc_client_port is None else True
-is_nn_dn_port_configured = False if nn_rpc_dn_port is None else True
-is_nn_healthcheck_port_configured = False if nn_rpc_healthcheck_port is None else True
-
-##### end #####
-
-import functools
-#create partial functions with common arguments for every HdfsResource call
-#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
-HdfsResource = functools.partial(
-  HdfsResource,
-  user=hdfs_user,
-  hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
-  security_enabled = security_enabled,
-  keytab = hdfs_user_keytab,
-  kinit_path_local = kinit_path_local,
-  hadoop_bin_dir = hadoop_bin_dir,
-  hadoop_conf_dir = hadoop_conf_dir,
-  principal_name = hdfs_principal_name,
-  hdfs_site = hdfs_site,
-  default_fs = default_fs,
-  immutable_paths = get_not_managed_resources(),
-  dfs_type = dfs_type
-)

+ 0 - 47
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/rack_awareness.py

@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management.core.resources import File
-from resource_management.core.source import StaticFile, Template
-from resource_management.libraries.functions import format
-
-
-def create_topology_mapping():
-  import params
-
-  File(params.net_topology_mapping_data_file_path,
-       content=Template("topology_mappings.data.j2"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script():
-  import params
-
-  File(params.net_topology_script_file_path,
-       content=StaticFile('topology_script.py'),
-       mode=0755,
-       only_if=format("test -d {net_topology_script_dir}"))
-
-def create_topology_script_and_mapping():
-  import params
-  if params.has_hadoop_env:
-    create_topology_mapping()
-    create_topology_script()

+ 0 - 249
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/scripts/shared_initialization.py

@@ -1,249 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil
-from resource_management.core.resources.jcepolicyinfo import JcePolicyInfo
-
-from resource_management import *
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  Execute(("setenforce","0"),
-          only_if="test -f /selinux/enforce",
-          not_if="(! which getenforce ) || (which getenforce && getenforce | grep -q Disabled)",
-          sudo=True,
-  )
-
-  #directories
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    Directory(params.hdfs_log_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group=params.user_group,
-              mode=0775,
-              cd_access='a',
-    )
-    if params.has_namenode:
-      Directory(params.hadoop_pid_dir_prefix,
-              create_parents = True,
-              owner='root',
-              group='root',
-              cd_access='a',
-      )
-    Directory(params.hadoop_tmp_dir,
-              create_parents = True,
-              owner=params.hdfs_user,
-              cd_access='a',
-              )
-  #files
-    if params.security_enabled:
-      tc_owner = "root"
-    else:
-      tc_owner = params.hdfs_user
-      
-    # if WebHDFS is not enabled we need this jar to create hadoop folders and copy tarballs to HDFS.
-    if params.sysprep_skip_copy_fast_jar_hdfs:
-      print "Skipping copying of fast-hdfs-resource.jar as host is sys prepped"
-    elif params.dfs_type == 'HCFS' or not WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs):
-      # for source-code of jar goto contrib/fast-hdfs-resource
-      File(format("{ambari_libs_dir}/fast-hdfs-resource.jar"),
-           mode=0644,
-           content=StaticFile("fast-hdfs-resource.jar")
-      )
-      
-    if os.path.exists(params.hadoop_conf_dir):
-      File(os.path.join(params.hadoop_conf_dir, 'commons-logging.properties'),
-           owner=tc_owner,
-           content=Template('commons-logging.properties.j2')
-      )
-
-      health_check_template_name = "health_check"
-      File(os.path.join(params.hadoop_conf_dir, health_check_template_name),
-           owner=tc_owner,
-           content=Template(health_check_template_name + ".j2")
-      )
-
-      log4j_filename = os.path.join(params.hadoop_conf_dir, "log4j.properties")
-      if (params.log4j_props != None):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-             content=params.log4j_props
-        )
-      elif (os.path.exists(format("{params.hadoop_conf_dir}/log4j.properties"))):
-        File(log4j_filename,
-             mode=0644,
-             group=params.user_group,
-             owner=params.hdfs_user,
-        )
-
-      File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-           owner=params.hdfs_user,
-           group=params.user_group,
-           content=InlineTemplate(params.hadoop_metrics2_properties_content)
-      )
-
-    if params.dfs_type == 'HCFS' and params.has_core_site and 'ECS_CLIENT' in params.component_list:
-       create_dirs()
-
-    create_microsoft_r_dir()
-
-
-def setup_configs():
-  """
-  Creates configs for services HDFS mapred
-  """
-  import params
-
-  if params.has_namenode or params.dfs_type == 'HCFS':
-    if os.path.exists(params.hadoop_conf_dir):
-      File(params.task_log4j_properties_location,
-           content=StaticFile("task-log4j.properties"),
-           mode=0755
-      )
-
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-      File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-           owner=params.hdfs_user,
-           group=params.user_group
-      )
-    if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-      File(os.path.join(params.hadoop_conf_dir, 'masters'),
-                owner=params.hdfs_user,
-                group=params.user_group
-      )
-
-def create_javahome_symlink():
-  if os.path.exists("/usr/jdk/jdk1.6.0_31") and not os.path.exists("/usr/jdk64/jdk1.6.0_31"):
-    Directory("/usr/jdk64/",
-         create_parents = True,
-    )
-    Link("/usr/jdk/jdk1.6.0_31",
-         to="/usr/jdk64/jdk1.6.0_31",
-    )
-
-def create_dirs():
-   import params
-   params.HdfsResource(params.hdfs_tmp_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.hdfs_user,
-                       mode=0777
-   )
-   params.HdfsResource(params.smoke_hdfs_user_dir,
-                       type="directory",
-                       action="create_on_execute",
-                       owner=params.smoke_user,
-                       mode=params.smoke_hdfs_user_mode
-   )
-   params.HdfsResource(None,
-                      action="execute"
-   )
-
-def create_microsoft_r_dir():
-  import params
-  if 'MICROSOFT_R_NODE_CLIENT' in params.component_list and params.default_fs:
-    directory = '/user/RevoShare'
-    try:
-      params.HdfsResource(directory,
-                          type="directory",
-                          action="create_on_execute",
-                          owner=params.hdfs_user,
-                          mode=0777)
-      params.HdfsResource(None, action="execute")
-    except Exception as exception:
-      Logger.warning("Could not check the existence of {0} on DFS while starting {1}, exception: {2}".format(directory, params.current_service, str(exception)))
-
-def setup_unlimited_key_jce_policy():
-  """
-  Sets up the unlimited key JCE policy if needed. (sets up ambari JCE as well if ambari and the  stack use different JDK)
-  """
-  import params
-  __setup_unlimited_key_jce_policy(custom_java_home=params.java_home, custom_jdk_name=params.jdk_name, custom_jce_name = params.jce_policy_zip)
-  if params.ambari_jce_name and params.ambari_jce_name != params.jce_policy_zip:
-    __setup_unlimited_key_jce_policy(custom_java_home=params.ambari_java_home, custom_jdk_name=params.ambari_jdk_name, custom_jce_name = params.ambari_jce_name)
-
-def __setup_unlimited_key_jce_policy(custom_java_home, custom_jdk_name, custom_jce_name):
-  """
-  Sets up the unlimited key JCE policy if needed.
-
-  The following criteria must be met:
-
-    * The cluster has not been previously prepared (sys preped) - cluster-env/sysprep_skip_setup_jce = False
-    * Ambari is managing the host's JVM - /hostLevelParams/jdk_name is set
-    * Either security is enabled OR a service requires it - /hostLevelParams/unlimited_key_jce_required = True
-    * The unlimited key JCE policy has not already been installed
-
-  If the conditions are met, the following steps are taken to install the unlimited key JCE policy JARs
-
-    1. The unlimited key JCE policy ZIP file is downloaded from the Ambari server and stored in the
-        Ambari agent's temporary directory
-    2. The existing JCE policy JAR files are deleted
-    3. The downloaded ZIP file is unzipped into the proper JCE policy directory
-
-  :return: None
-  """
-  import params
-
-  if params.sysprep_skip_setup_jce:
-    Logger.info("Skipping unlimited key JCE policy check and setup since the host is sys prepped")
-
-  elif not custom_jdk_name:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since the Java VM is not managed by Ambari")
-
-  elif not params.unlimited_key_jce_required:
-    Logger.debug("Skipping unlimited key JCE policy check and setup since it is not required")
-
-  else:
-    jcePolicyInfo = JcePolicyInfo(custom_java_home)
-
-    if jcePolicyInfo.is_unlimited_key_jce_policy():
-      Logger.info("The unlimited key JCE policy is required, and appears to have been installed.")
-
-    elif custom_jce_name is None:
-      raise Fail("The unlimited key JCE policy needs to be installed; however the JCE policy zip is not specified.")
-
-    else:
-      Logger.info("The unlimited key JCE policy is required, and needs to be installed.")
-
-      jce_zip_target = format("{artifact_dir}/{custom_jce_name}")
-      jce_zip_source = format("{ambari_server_resources_url}/{custom_jce_name}")
-      java_security_dir = format("{custom_java_home}/jre/lib/security")
-
-      Logger.debug("Downloading the unlimited key JCE policy files from {0} to {1}.".format(jce_zip_source, jce_zip_target))
-      Directory(params.artifact_dir, create_parents=True)
-      File(jce_zip_target, content=DownloadSource(jce_zip_source))
-
-      Logger.debug("Removing existing JCE policy JAR files: {0}.".format(java_security_dir))
-      File(format("{java_security_dir}/US_export_policy.jar"), action="delete")
-      File(format("{java_security_dir}/local_policy.jar"), action="delete")
-
-      Logger.debug("Unzipping the unlimited key JCE policy files from {0} into {1}.".format(jce_zip_target, java_security_dir))
-      extract_cmd = ("unzip", "-o", "-j", "-q", jce_zip_target, "-d", java_security_dir)
-      Execute(extract_cmd,
-              only_if=format("test -e {java_security_dir} && test -f {jce_zip_target}"),
-              path=['/bin/', '/usr/bin'],
-              sudo=True
-              )

+ 0 - 43
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/commons-logging.properties.j2

@@ -1,43 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

+ 0 - 21
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/exclude_hosts_list.j2

@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

+ 0 - 107
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/hadoop-metrics2.properties.j2

@@ -1,107 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-nimbus.sink.ganglia.servers={{ganglia_server_host}}:8649
-supervisor.sink.ganglia.servers={{ganglia_server_host}}:8650
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}
-
-{% if has_metric_collector %}
-
-*.period={{metrics_collection_period}}
-*.sink.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar
-*.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink
-*.sink.timeline.period={{metrics_collection_period}}
-*.sink.timeline.sendInterval={{metrics_report_interval}}000
-*.sink.timeline.slave.host.name={{hostname}}
-*.sink.timeline.zookeeper.quorum={{zookeeper_quorum}}
-*.sink.timeline.protocol={{metric_collector_protocol}}
-*.sink.timeline.port={{metric_collector_port}}
-*.sink.timeline.host_in_memory_aggregation = {{host_in_memory_aggregation}}
-*.sink.timeline.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}}
-
-# HTTPS properties
-*.sink.timeline.truststore.path = {{metric_truststore_path}}
-*.sink.timeline.truststore.type = {{metric_truststore_type}}
-*.sink.timeline.truststore.password = {{metric_truststore_password}}
-
-datanode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-namenode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-resourcemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-nodemanager.sink.timeline.collector.hosts={{ams_collector_hosts}}
-jobhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-journalnode.sink.timeline.collector.hosts={{ams_collector_hosts}}
-applicationhistoryserver.sink.timeline.collector.hosts={{ams_collector_hosts}}
-
-resourcemanager.sink.timeline.tagsForPrefix.yarn=Queue
-
-{% if is_nn_client_port_configured %}
-# Namenode rpc ports customization
-namenode.sink.timeline.metric.rpc.client.port={{nn_rpc_client_port}}
-{% endif %}
-{% if is_nn_dn_port_configured %}
-namenode.sink.timeline.metric.rpc.datanode.port={{nn_rpc_dn_port}}
-{% endif %}
-{% if is_nn_healthcheck_port_configured %}
-namenode.sink.timeline.metric.rpc.healthcheck.port={{nn_rpc_healthcheck_port}}
-{% endif %}
-
-{% endif %}

+ 0 - 81
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/health_check.j2

@@ -1,81 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-# Run all checks
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

+ 0 - 21
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/include_hosts_list.j2

@@ -1,21 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

+ 0 - 24
ambari-server/src/main/resources/stacks/HDP/3.0/hooks/before-START/templates/topology_mappings.data.j2

@@ -1,24 +0,0 @@
-{#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-    #
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#}
-[network_topology]
-{% for host in all_hosts %}
-{% if host in slave_hosts %}
-{{host}}={{all_racks[loop.index-1]}}
-{{all_ipv4_ips[loop.index-1]}}={{all_racks[loop.index-1]}}
-{% endif %}
-{% endfor %}

+ 0 - 19
ambari-server/src/test/java/org/apache/ambari/server/api/services/AmbariMetaInfoTest.java

@@ -1265,25 +1265,6 @@ public class AmbariMetaInfoTest {
     }
   }
 
-
-  @Test
-  public void testHooksDirInheritance() throws Exception {
-    String hookAssertionTemplate = "HDP/%s/hooks";
-    if (System.getProperty("os.name").contains("Windows")) {
-      hookAssertionTemplate = "HDP\\%s\\hooks";
-    }
-    // Test hook dir determination in parent
-    StackInfo stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.6");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.6"), stackInfo.getStackHooksFolder());
-    // Test hook dir inheritance
-    stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.7");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.6"), stackInfo.getStackHooksFolder());
-    // Test hook dir override
-    stackInfo = metaInfo.getStack(STACK_NAME_HDP, "2.0.8");
-    Assert.assertEquals(String.format(hookAssertionTemplate, "2.0.8"), stackInfo.getStackHooksFolder());
-  }
-
-
   @Test
   public void testServicePackageDirInheritance() throws Exception {
     String assertionTemplate07 = StringUtils.join(

+ 1 - 0
ambari-server/src/test/python/TestResourceFilesKeeper.py

@@ -85,6 +85,7 @@ class TestResourceFilesKeeper(TestCase):
       "call('../resources/TestAmbaryServer.samples/" \
       "dummy_common_services/HIVE/0.11.0.2.0.5.0/package'),\n " \
       "call('../resources/TestAmbaryServer.samples/dummy_extension/HIVE/package'),\n " \
+      "call('../resources/stack-hooks'),\n " \
       "call('../resources/custom_actions'),\n " \
       "call('../resources/host_scripts'),\n " \
       "call('../resources/dashboards')]"

+ 19 - 7
ambari-server/src/test/python/stacks/2.0.6/hooks/after-INSTALL/test_after_install.py

@@ -29,7 +29,7 @@ from resource_management.libraries.script import Script
 @patch("os.path.isfile", new = MagicMock(return_value=False))
 class TestHookAfterInstall(RMFTestCase):
   CONFIG_OVERRIDES = {"serviceName":"HIVE", "role":"HIVE_SERVER"}
-
+  STACK_VERSION = '2.0.6'
   def setUp(self):
     Logger.initialize_logger()
 
@@ -41,10 +41,12 @@ class TestHookAfterInstall(RMFTestCase):
 
   def test_hook_default(self):
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
                        config_file="default.json",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_overrides = self.CONFIG_OVERRIDES
     )
     self.assertResourceCalled('XmlConfig', 'core-site.xml',
@@ -82,9 +84,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -156,9 +160,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -235,9 +241,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['commandParams']['version'] = version
     json_content['hostLevelParams']['stack_version'] = "2.3"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
       classname="AfterInstallHook",
       command="hook",
+      stack_version = self.STACK_VERSION,
+      target=RMFTestCase.TARGET_STACK_HOOKS,
       config_dict = json_content,
       config_overrides = self.CONFIG_OVERRIDES)
 
@@ -265,9 +273,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['hostLevelParams']['stack_version'] = "2.3"
     json_content['roleParams']['upgrade_suspended'] = "true"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 
@@ -338,9 +348,11 @@ class TestHookAfterInstall(RMFTestCase):
     json_content['hostLevelParams']['stack_version'] = "2.3"
     json_content['hostLevelParams']['host_sys_prepped'] = "true"
 
-    self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
+    self.executeScript("after-INSTALL/scripts/hook.py",
                        classname="AfterInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict = json_content,
                        config_overrides = self.CONFIG_OVERRIDES)
 

+ 4 - 1
ambari-server/src/test/python/stacks/2.0.6/hooks/before-ANY/test_before_any.py

@@ -28,6 +28,7 @@ import os
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeInstall(RMFTestCase):
   TMP_PATH = '/tmp/hbase-hbase'
+  STACK_VERSION = '2.0.6'
 
   @patch("os.path.isfile")
   @patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
@@ -43,9 +44,11 @@ class TestHookBeforeInstall(RMFTestCase):
     os_path_exists_mock.side_effect = side_effect
     os_path_isfile_mock.side_effect = [False, True, True, True, True]
 
-    self.executeScript("2.0.6/hooks/before-ANY/scripts/hook.py",
+    self.executeScript("before-ANY/scripts/hook.py",
                        classname="BeforeAnyHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_file="default.json",
                        call_mocks=itertools.cycle([(0, "1000")])
     )

+ 11 - 3
ambari-server/src/test/python/stacks/2.0.6/hooks/before-INSTALL/test_before_install.py

@@ -27,9 +27,13 @@ import json
 @patch.object(getpass, "getuser", new = MagicMock(return_value='some_user'))
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeInstall(RMFTestCase):
+  STACK_VERSION = '2.0.6'
+
   def test_hook_default(self):
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="default.json"
     )
@@ -63,9 +67,11 @@ class TestHookBeforeInstall(RMFTestCase):
 
     command_json['hostLevelParams']['repo_info'] = "[]"
 
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_dict=command_json)
 
     self.assertResourceCalled('Package', 'unzip', retry_count=5, retry_on_repo_unavailability=False)
@@ -75,9 +81,11 @@ class TestHookBeforeInstall(RMFTestCase):
 
 
   def test_hook_default_repository_file(self):
-    self.executeScript("2.0.6/hooks/before-INSTALL/scripts/hook.py",
+    self.executeScript("before-INSTALL/scripts/hook.py",
                        classname="BeforeInstallHook",
                        command="hook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        config_file="repository_file.json"
     )
     self.assertResourceCalled('Repository', 'HDP-2.2-repo-4',

+ 16 - 5
ambari-server/src/test/python/stacks/2.0.6/hooks/before-START/test_before_start.py

@@ -28,9 +28,12 @@ import json
 @patch("os.path.exists", new = MagicMock(return_value=True))
 @patch.object(Hook, "run_custom_hook", new = MagicMock())
 class TestHookBeforeStart(RMFTestCase):
+  STACK_VERSION = '2.0.6'
   def test_hook_default(self):
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="default.json"
     )
@@ -104,8 +107,10 @@ class TestHookBeforeStart(RMFTestCase):
     self.assertNoMoreResources()
 
   def test_hook_secured(self):
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_file="secured.json"
     )
@@ -184,8 +189,10 @@ class TestHookBeforeStart(RMFTestCase):
       default_json = json.load(f)
 
     default_json['serviceName']= 'HDFS'
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_dict=default_json
     )
@@ -266,8 +273,10 @@ class TestHookBeforeStart(RMFTestCase):
     default_json['serviceName'] = 'HDFS'
     default_json['configurations']['core-site']['net.topology.script.file.name'] = '/home/myhadoop/hadoop/conf.hadoop/topology_script.py'
 
-    self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+    self.executeScript("before-START/scripts/hook.py",
                        classname="BeforeStartHook",
+                       stack_version = self.STACK_VERSION,
+                       target=RMFTestCase.TARGET_STACK_HOOKS,
                        command="hook",
                        config_dict=default_json
     )
@@ -342,8 +351,10 @@ class TestHookBeforeStart(RMFTestCase):
 
   def test_that_jce_is_required_in_secured_cluster(self):
     try:
-      self.executeScript("2.0.6/hooks/before-START/scripts/hook.py",
+      self.executeScript("before-START/scripts/hook.py",
                          classname="BeforeStartHook",
+                         stack_version = self.STACK_VERSION,
+                         target=RMFTestCase.TARGET_STACK_HOOKS,
                          command="hook",
                          config_file="secured_no_jce_name.json"
       )

+ 8 - 0
ambari-server/src/test/python/stacks/utils/RMFTestCase.py

@@ -43,6 +43,7 @@ PATH_TO_STACKS = "main/resources/stacks/HDP"
 PATH_TO_STACK_TESTS = "test/python/stacks/"
 
 PATH_TO_COMMON_SERVICES = "main/resources/common-services"
+PATH_TO_STACK_HOOKS = "main/resources/stack-hooks"
 
 PATH_TO_CUSTOM_ACTIONS = "main/resources/custom_actions"
 PATH_TO_CUSTOM_ACTION_TESTS = "test/python/custom_actions"
@@ -62,6 +63,9 @@ class RMFTestCase(TestCase):
   # build all paths to test common services scripts
   TARGET_COMMON_SERVICES = 'TARGET_COMMON_SERVICES'
 
+  # build all paths to test common services scripts
+  TARGET_STACK_HOOKS = 'TARGET_STACK_HOOKS'
+
   def executeScript(self, path, classname=None, command=None, config_file=None,
                     config_dict=None,
                     # common mocks for all the scripts
@@ -195,6 +199,10 @@ class RMFTestCase(TestCase):
       base_path = os.path.join(src_dir, PATH_TO_COMMON_SERVICES)
       configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
       return base_path, configs_path
+    elif target == self.TARGET_STACK_HOOKS:
+      base_path = os.path.join(src_dir, PATH_TO_STACK_HOOKS)
+      configs_path = os.path.join(src_dir, PATH_TO_STACK_TESTS, stack_version, "configs")
+      return base_path, configs_path
     else:
       raise RuntimeError("Wrong target value %s", target)
 

+ 1 - 0
contrib/management-packs/hdf-ambari-mpack/src/main/assemblies/hdf-ambari-mpack.xml

@@ -40,6 +40,7 @@
   -->
   <fileSets>
     <fileSet>
+      <!--TODO-->
       <directory>src/main/resources/hooks</directory>
       <outputDirectory>hooks</outputDirectory>
     </fileSet>