Przeglądaj źródła

AMBARI-14190. Log displays wrong text for various tasks of Express Upgrade (dlysnichenko)

Lisnichenko Dmitro 9 lat temu
rodzic
commit
a532b8c22d

+ 1 - 1
ambari-agent/src/main/python/ambari_agent/Controller.py

@@ -449,7 +449,7 @@ class Controller(threading.Thread):
     """
     In Ambari 2.1.2, we moved the dfs_data_dir_mount.hist to a static location
     because /etc/hadoop/conf points to a symlink'ed location that would change during
-    Rolling Upgrade.
+    Stack Upgrade.
     """
     try:
       if compare_versions(self.version, "2.1.2") >= 0:

+ 13 - 4
ambari-agent/src/test/python/resource_management/TestPackageResource.py

@@ -38,6 +38,7 @@ class TestPackageResource(TestCase):
     call_mock.return_value= (1, '')
     with Environment('/') as env:
       Package("some_package",
+        logoutput = False
       )
     call_mock.assert_has_calls([call("dpkg --get-selections | grep -v deinstall | awk '{print $1}' | grep ^some-package$"),
  call(['/usr/bin/apt-get', '-q', '-o', 'Dpkg::Options::=--force-confdef', '--allow-unauthenticated', '--assume-yes', 'install', 'some-package'], logoutput=False, sudo=True, env={'DEBIAN_FRONTEND': 'noninteractive'}),
@@ -54,6 +55,7 @@ class TestPackageResource(TestCase):
     shell_mock.return_value = (0, '')
     with Environment('/') as env:
       Package("some_package",
+        logoutput = False
       )
     call_mock.assert_has_calls([call("dpkg --get-selections | grep -v deinstall | awk '{print $1}' | grep ^some-package$"),
  call(['/usr/bin/apt-get', '-q', '-o', 'Dpkg::Options::=--force-confdef', '--allow-unauthenticated', '--assume-yes', 'install', 'some-package'], logoutput=False, sudo=True, env={'DEBIAN_FRONTEND': 'noninteractive'})])
@@ -96,6 +98,7 @@ class TestPackageResource(TestCase):
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_packag'}]
     with Environment('/') as env:
       Package("some_package",
+        logoutput = False
       )
     self.assertTrue(sys.modules['rpm'].TransactionSet.return_value.dbMatch.called)
     shell_mock.assert_called_with(['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'install', 'some_package'], logoutput=False, sudo=True)
@@ -108,6 +111,7 @@ class TestPackageResource(TestCase):
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_packag'}]
     with Environment('/') as env:
       Package("some_package*",
+        logoutput = False
       )
     shell_mock.assert_called_with(['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'install', 'some_package*'], logoutput=False, sudo=True)
 
@@ -132,6 +136,7 @@ class TestPackageResource(TestCase):
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_packages'}]
     with Environment('/') as env:
       Package("some_package",
+        logoutput = False
       )
     shell_mock.assert_called_with(['/usr/bin/zypper', '--quiet', 'install', '--auto-agree-with-licenses', '--no-confirm', 'some_package'], logoutput=False, sudo=True)
 
@@ -175,7 +180,8 @@ class TestPackageResource(TestCase):
   @patch.object(System, "os_family", new = 'redhat')
   def test_action_install_use_repos_rhel(self, shell_mock):
     with Environment('/') as env:
-      Package("some_package", use_repos=['HDP-UTILS-2.2.0.1-885', 'HDP-2.2.0.1-885']
+      Package("some_package", use_repos=['HDP-UTILS-2.2.0.1-885', 'HDP-2.2.0.1-885'],
+              logoutput = False
               )
     self.assertEquals(shell_mock.call_args[0][0],
                       ['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'install',
@@ -203,7 +209,8 @@ class TestPackageResource(TestCase):
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_package'}]
     with Environment('/') as env:
       Package("some_package",
-              action = "remove"
+              action = "remove",
+              logoutput = False
       )
     shell_mock.assert_called_with(['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'erase', 'some_package'], logoutput=False, sudo=True)
 
@@ -217,7 +224,8 @@ class TestPackageResource(TestCase):
     sys.modules['rpm'].TransactionSet.return_value.dbMatch.return_value = [{'name':'some_package'}]
     with Environment('/') as env:
       Package("some_package",
-              action = "remove"
+              action = "remove",
+              logoutput = False
       )
     shell_mock.assert_called_with(['/usr/bin/zypper', '--quiet', 'remove', '--no-confirm', 'some_package'], logoutput=False, sudo=True)
 
@@ -227,7 +235,8 @@ class TestPackageResource(TestCase):
   def test_action_install_version_attr(self, shell_mock):
     with Environment('/') as env:
       Package("some_package",
-              version = "3.5.0"
+              version = "3.5.0",
+              logoutput = False
       )
     shell_mock.assert_called_with(['/usr/bin/yum', '-d', '0', '-e', '0', '-y', 'install', 'some_package-3.5.0'], logoutput=False, sudo=True)
 

+ 2 - 2
ambari-common/src/main/python/resource_management/libraries/functions/constants.py

@@ -22,7 +22,7 @@ __all__ = ["Direction", "SafeMode"]
 
 class Direction:
   """
-  Rolling Upgrade direction
+  Stack Upgrade direction
   """
   UPGRADE = "upgrade"
   DOWNGRADE = "downgrade"
@@ -33,4 +33,4 @@ class SafeMode:
   """
   ON = "ON"
   OFF = "OFF"
-  UNKNOWN = "UNKNOWN"
+  UNKNOWN = "UNKNOWN"

+ 8 - 8
ambari-common/src/main/python/resource_management/libraries/functions/copy_tarball.py

@@ -98,7 +98,7 @@ def _get_single_version_from_hdp_select():
   return hdp_version
 
 def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
-                 use_ru_version_during_ru=True, replace_existing_files=False, host_sys_prepped=False):
+                 use_upgrading_version_during_uprade=True, replace_existing_files=False, host_sys_prepped=False):
   """
   :param name: Tarball name, e.g., tez, hive, pig, sqoop.
   :param user_group: Group to own the directory.
@@ -107,7 +107,7 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
   :param custom_source_file: Override the source file path
   :param custom_dest_file: Override the destination file path
   :param force_execute: If true, will execute the HDFS commands immediately, otherwise, will defer to the calling function.
-  :param use_ru_version_during_ru: If true, will use the version going to during RU. Otherwise, use the CURRENT (source) version.
+  :param use_upgrading_version_during_uprade: If true, will use the version going to during upgrade. Otherwise, use the CURRENT (source) version.
   :param host_sys_prepped: If true, tarballs will not be copied as the cluster deployment uses prepped VMs.
   :return: Will return True if successful, otherwise, False.
   """
@@ -135,16 +135,16 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
     return True
 
   upgrade_direction = default("/commandParams/upgrade_direction", None)
-  is_rolling_upgrade = upgrade_direction is not None
+  is_stack_upgrade = upgrade_direction is not None
   current_version = default("/hostLevelParams/current_version", None)
   Logger.info("Default version is {0}".format(current_version))
-  if is_rolling_upgrade:
-    if use_ru_version_during_ru:
+  if is_stack_upgrade:
+    if use_upgrading_version_during_uprade:
       # This is the version going to. In the case of a downgrade, it is the lower version.
       current_version = default("/commandParams/version", None)
-      Logger.info("Because this is a Rolling Upgrade, will use version {0}".format(current_version))
+      Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
     else:
-      Logger.info("This is a Rolling Upgrade, but keep the version unchanged.")
+      Logger.info("This is a Stack Upgrade, but keep the version unchanged.")
   else:
     if current_version is None:
       # During normal operation, the first installation of services won't yet know about the version, so must rely
@@ -155,7 +155,7 @@ def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=Non
         current_version = hdp_version
 
   if current_version is None:
-    message_suffix = "during rolling %s" % str(upgrade_direction) if is_rolling_upgrade else ""
+    message_suffix = "during rolling %s" % str(upgrade_direction) if is_stack_upgrade else ""
     Logger.warning("Cannot copy {0} tarball because unable to determine current version {1}.".format(name, message_suffix))
     return False
 

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/api/services/ClusterStackVersionService.java

@@ -124,7 +124,7 @@ public class ClusterStackVersionService extends BaseService {
 
   /**
    * Handles: POST /{clustername}/stack_versions requests
-   * triggering Finalize during manual Rolling Upgrade
+   * triggering Finalize during manual Stack Upgrade
    *
    * @param body        http body
    * @param headers     http headers

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/checks/CheckDescription.java

@@ -24,7 +24,7 @@ import org.apache.ambari.server.state.stack.PrereqCheckType;
 
 /**
  * Enum that wraps the various type, text and failure messages for the checks
- * done for Rolling Upgrades.
+ * done for Stack Upgrades.
  */
 @SuppressWarnings("serial")
 public enum CheckDescription {

+ 2 - 2
ambari-server/src/main/java/org/apache/ambari/server/controller/AmbariManagementControllerImpl.java

@@ -1490,8 +1490,8 @@ public class AmbariManagementControllerImpl implements AmbariManagementControlle
     if (currentVersion == null) {
       cluster.setCurrentStackVersion(desiredVersion);
     }
-    // Rolling Upgrade: unlike the workflow for creating a cluster, updating a cluster via the API will not
-    // create any ClusterVersionEntity changes because those have to go through the Rolling Upgrade process.
+    // Stack Upgrade: unlike the workflow for creating a cluster, updating a cluster via the API will not
+    // create any ClusterVersionEntity changes because those have to go through the Stack Upgrade process.
 
     boolean requiresHostListUpdate =
         request.getHostNames() != null && !request.getHostNames().isEmpty();

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/ClusterStackVersionResourceProvider.java

@@ -602,7 +602,7 @@ public class ClusterStackVersionResourceProvider extends AbstractControllerResou
 
   /**
    * The only appliance of this method is triggering Finalize during
-   * manual Rolling Upgrade
+   * manual Stack Upgrade
    */
   @Override
   public RequestStatus updateResources(Request request, Predicate predicate)

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/RepositoryVersionResourceProvider.java

@@ -403,7 +403,7 @@ public class RepositoryVersionResourceProvider extends AbstractResourceProvider
         String baseUrl = repositoryEntity.getBaseUrl();
         if (existingRepoUrls.contains(baseUrl)) {
           throw new AmbariException("Base url " + baseUrl + " is already defined for another repository version. " +
-                  "Setting up base urls that contain the same versions of components will cause rolling upgrade to fail.");
+                  "Setting up base urls that contain the same versions of components will cause stack upgrade to fail.");
         }
       }
     }

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/orm/dao/HostRoleCommandDAO.java

@@ -455,7 +455,7 @@ public class HostRoleCommandDAO {
   /**
    * Finds the counts of tasks for a request and groups them by stage id.
    * This allows for very efficient loading when there are a huge number of stages
-   * and tasks to iterate (for example, during a Rolling Upgrade).
+   * and tasks to iterate (for example, during a Stack Upgrade).
    * @param requestId the request id
    * @return the map of stage-to-summary objects
    */

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/cluster/ClusterImpl.java

@@ -1477,7 +1477,7 @@ public class ClusterImpl implements Cluster {
           hostVersionDAO.merge(hostVersionEntity);
         }
       } else {
-        // Handle transitions during a Rolling Upgrade
+        // Handle transitions during a Stack Upgrade
 
         // If a host only has one Component to update, that single report can still transition the host version from
         // INSTALLED->UPGRADING->UPGRADED in one shot.

+ 1 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_client.py

@@ -52,7 +52,7 @@ class FalconClientLinux(FalconClient):
     if not params.version or compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') < 0:
       return
 
-    Logger.info("Executing Falcon Client Rolling Upgrade pre-restart")
+    Logger.info("Executing Falcon Client Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
     hdp_select.select("falcon-client", params.version)
 

+ 1 - 1
ambari-server/src/main/resources/common-services/FALCON/0.5.0.2.1/package/scripts/falcon_server.py

@@ -80,7 +80,7 @@ class FalconServerLinux(FalconServer):
     if Script.is_hdp_stack_less_than("2.2"):
       return
 
-    Logger.info("Executing Falcon Server Rolling Upgrade pre-restart")
+    Logger.info("Executing Falcon Server Stack Upgrade pre-restart")
     conf_select.select(params.stack_name, "falcon", params.version)
     hdp_select.select("falcon-server", params.version)
     falcon_server_upgrade.pre_start_restore()

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/hooks/after-INSTALL/scripts/shared_initialization.py

@@ -39,7 +39,7 @@ def setup_hdp_install_directory():
     Execute(format('{sudo} /usr/bin/hdp-select set all `ambari-python-wrap /usr/bin/hdp-select versions | grep ^{stack_version_unformatted} | tail -1`') + ' && ' +
             as_sudo(['touch', SELECT_ALL_PERFORMED_MARKER]),
             only_if=format('ls -d /usr/hdp/{stack_version_unformatted}*'),   # If any HDP version is installed
-            not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break rolling upgrade logic)
+            not_if=format("test -f {SELECT_ALL_PERFORMED_MARKER}")           # Do that only once (otherwise we break stack upgrade logic)
     )
 
 def setup_config():