Kaynağa Gözat

AMBARI-8777. Upgrade Pack definition for Clients (ncole)

Nate Cole 10 yıl önce
ebeveyn
işleme
259e56298a
26 değiştirilmiş dosya ile 529 ekleme ve 60 silme
  1. 13 10
      ambari-common/src/main/python/resource_management/libraries/script/script.py
  2. 5 1
      ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java
  3. 13 13
      ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java
  4. 1 1
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java
  5. 14 8
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java
  6. 4 2
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java
  7. 5 4
      ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java
  8. 8 0
      ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py
  9. 6 0
      ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py
  10. 8 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py
  11. 3 0
      ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py
  12. 4 1
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py
  13. 8 0
      ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py
  14. 7 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py
  15. 7 0
      ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py
  16. 3 0
      ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py
  17. 8 0
      ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/tez_client.py
  18. 82 16
      ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml
  19. 2 2
      ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java
  20. 11 2
      ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py
  21. 12 0
      ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py
  22. 9 0
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py
  23. 9 0
      ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py
  24. 139 0
      ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json
  25. 9 0
      ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py
  26. 139 0
      ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json

+ 13 - 10
ambari-common/src/main/python/resource_management/libraries/script/script.py

@@ -275,19 +275,22 @@ class Script(object):
     except KeyError:
       pass
 
+    restart_type = ""
+    if config is not None:
+      command_params = config["commandParams"] if "commandParams" in config else None
+      if command_params is not None:
+        restart_type = command_params["restart_type"] if "restart_type" in command_params else ""
+        if restart_type:
+          restart_type = restart_type.encode('ascii', 'ignore')
+
+    rolling_restart = restart_type.lower().startswith("rolling")
+
     if componentCategory and componentCategory.strip().lower() == 'CLIENT'.lower():
+      if rolling_restart:
+        self.pre_rolling_restart(env)
+
       self.install(env)
     else:
-      restart_type = ""
-      if config is not None:
-        command_params = config["commandParams"] if "commandParams" in config else None
-        if command_params is not None:
-          restart_type = command_params["restart_type"] if "restart_type" in command_params else ""
-          if restart_type:
-            restart_type = restart_type.encode('ascii', 'ignore')
-
-      rolling_restart = restart_type.lower().startswith("rolling")
-
       # To remain backward compatible with older stacks, only pass rolling_restart if True.
       if rolling_restart:
         self.stop(env, rolling_restart=rolling_restart)

+ 5 - 1
ambari-server/src/main/java/org/apache/ambari/server/controller/internal/UpgradeResourceProvider.java

@@ -362,6 +362,10 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
     UpgradeHelper helper = new UpgradeHelper();
 
     List<UpgradeGroupHolder> groups = helper.createUpgrade(cluster, mhr, pack);
+    if (groups.isEmpty()) {
+      throw new AmbariException("There are no upgrade groupings available");
+    }
+
     List<UpgradeGroupEntity> groupEntities = new ArrayList<UpgradeGroupEntity>();
 
     final String version = (String) requestMap.get(UPGRADE_VERSION);
@@ -380,7 +384,7 @@ public class UpgradeResourceProvider extends AbstractControllerResourceProvider
         itemEntity.setTasks(wrapper.getTasksJson());
         itemEntity.setHosts(wrapper.getHostsJson());
         itemEntities.add(itemEntity);
-        
+
         injectVariables(configHelper, cluster, itemEntity);
 
         // upgrade items match a stage

+ 13 - 13
ambari-server/src/main/java/org/apache/ambari/server/state/UpgradeHelper.java

@@ -17,12 +17,11 @@
  */
 package org.apache.ambari.server.state;
 
-import java.text.MessageFormat;
 import java.util.ArrayList;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.LinkedHashSet;
 
 import org.apache.ambari.server.AmbariException;
 import org.apache.ambari.server.controller.internal.RequestResourceProvider;
@@ -42,7 +41,6 @@ import org.apache.ambari.server.controller.utilities.PredicateBuilder;
 import org.apache.ambari.server.controller.utilities.PropertyHelper;
 import org.apache.ambari.server.stack.HostsType;
 import org.apache.ambari.server.stack.MasterHostResolver;
-import org.apache.ambari.server.state.cluster.ClusterImpl;
 import org.apache.ambari.server.state.stack.UpgradePack;
 import org.apache.ambari.server.state.stack.UpgradePack.ProcessingComponent;
 import org.apache.ambari.server.state.stack.upgrade.ClusterGrouping;
@@ -50,7 +48,6 @@ import org.apache.ambari.server.state.stack.upgrade.Grouping;
 import org.apache.ambari.server.state.stack.upgrade.StageWrapper;
 import org.apache.ambari.server.state.stack.upgrade.StageWrapperBuilder;
 import org.apache.ambari.server.state.stack.upgrade.TaskWrapper;
-import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,7 +82,6 @@ public class UpgradeHelper {
       UpgradeGroupHolder groupHolder = new UpgradeGroupHolder();
       groupHolder.name = group.name;
       groupHolder.title = group.title;
-      groups.add(groupHolder);
 
       StageWrapperBuilder builder = group.getBuilder();
 
@@ -104,9 +100,10 @@ public class UpgradeHelper {
           if (null == hostsType) {
             continue;
           }
-          
+
+          Service svc = cluster.getService(service.serviceName);
           ProcessingComponent pc = allTasks.get(service.serviceName).get(component);
-          
+
           // Special case for NAMENODE
           if (service.serviceName.equalsIgnoreCase("HDFS") && component.equalsIgnoreCase("NAMENODE")) {
             // !!! revisit if needed
@@ -120,22 +117,25 @@ public class UpgradeHelper {
 
               // Override the hosts with the ordered collection
               hostsType.hosts = order;
-              
+
             } else {
 //              throw new AmbariException(MessageFormat.format("Could not find active and standby namenodes using hosts: {0}", StringUtils.join(hostsType.hosts, ", ").toString()));
             }
-            
-            builder.add(hostsType, service.serviceName, pc);
-            
+
+            builder.add(hostsType, service.serviceName, svc.isClientOnlyService(), pc);
+
           } else {
-            builder.add(hostsType, service.serviceName, pc);
+            builder.add(hostsType, service.serviceName, svc.isClientOnlyService(), pc);
           }
         }
       }
 
       List<StageWrapper> proxies = builder.build();
 
-      groupHolder.items = proxies;
+      if (!proxies.isEmpty()) {
+        groupHolder.items = proxies;
+        groups.add(groupHolder);
+      }
     }
 
     if (LOG.isDebugEnabled()) {

+ 1 - 1
ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ClusterGrouping.java

@@ -82,7 +82,7 @@ public class ClusterGrouping extends Grouping {
     }
 
     @Override
-    public void add(HostsType hostsType, String service, ProcessingComponent pc) {
+    public void add(HostsType hostsType, String service, boolean clientOnly, ProcessingComponent pc) {
       // !!! no-op in this case
     }
 

+ 14 - 8
ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/ColocatedGrouping.java

@@ -49,10 +49,10 @@ public class ColocatedGrouping extends Grouping {
 
   @Override
   public StageWrapperBuilder getBuilder() {
-    return new MultiHomedHolder(batch);
+    return new MultiHomedBuilder(batch);
   }
 
-  private static class MultiHomedHolder extends StageWrapperBuilder {
+  private static class MultiHomedBuilder extends StageWrapperBuilder {
 
     private Batch batch;
 
@@ -61,12 +61,12 @@ public class ColocatedGrouping extends Grouping {
     private Map<String, List<TaskProxy>> finalBatches = new LinkedHashMap<String, List<TaskProxy>>();
 
 
-    private MultiHomedHolder(Batch batch) {
+    private MultiHomedBuilder(Batch batch) {
       this.batch = batch;
     }
 
     @Override
-    public void add(HostsType hostsType, String service, ProcessingComponent pc) {
+    public void add(HostsType hostsType, String service, boolean clientOnly, ProcessingComponent pc) {
 
       int count = Double.valueOf(Math.ceil(
           (double) batch.percent / 100 * hostsType.hosts.size())).intValue();
@@ -88,6 +88,7 @@ public class ColocatedGrouping extends Grouping {
 
         if (null != pc.preTasks && pc.preTasks.size() > 0) {
           proxy = new TaskProxy();
+          proxy.clientOnly = clientOnly;
           proxy.message = getStageText("Preparing", pc.name, Collections.singleton(host));
           proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, pc.preTasks));
           proxy.service = service;
@@ -100,6 +101,7 @@ public class ColocatedGrouping extends Grouping {
           Task t = pc.tasks.get(0);
           if (RestartTask.class.isInstance(t)) {
             proxy = new TaskProxy();
+            proxy.clientOnly = clientOnly;
             proxy.tasks.add(new TaskWrapper(service, pc.name, Collections.singleton(host), t));
             proxy.restart = true;
             proxy.service = service;
@@ -112,6 +114,7 @@ public class ColocatedGrouping extends Grouping {
 
         if (null != pc.postTasks && pc.postTasks.size() > 0) {
           proxy = new TaskProxy();
+          proxy.clientOnly = clientOnly;
           proxy.component = pc.name;
           proxy.service = service;
           proxy.tasks.addAll(TaskWrapperBuilder.getTaskList(service, pc.name, singleHostsType, pc.postTasks));
@@ -130,13 +133,13 @@ public class ColocatedGrouping extends Grouping {
         LOG.debug("RU initial: {}", initialBatch);
         LOG.debug("RU final: {}", finalBatches);
       }
-      
+
       results.addAll(fromProxies(initialBatch));
 
       // !!! TODO when manual tasks are ready
       ManualTask task = new ManualTask();
       task.message = batch.message;
-      
+
       StageWrapper wrapper = new StageWrapper(
           StageWrapper.Type.MANUAL,
           "Validate partial upgrade",
@@ -160,7 +163,9 @@ public class ColocatedGrouping extends Grouping {
         List<StageWrapper> execwrappers = new ArrayList<StageWrapper>();
 
         for (TaskProxy t : entry.getValue()) {
-          serviceChecks.add(t.service);
+          if (!t.clientOnly) {
+            serviceChecks.add(t.service);
+          }
 
           if (!t.restart) {
             if (null == wrapper) {
@@ -174,7 +179,7 @@ public class ColocatedGrouping extends Grouping {
         if (null != wrapper) {
           results.add(wrapper);
         }
-        
+
         if (execwrappers.size() > 0) {
           results.addAll(execwrappers);
         }
@@ -209,6 +214,7 @@ public class ColocatedGrouping extends Grouping {
     private String service;
     private String component;
     private String message;
+    private boolean clientOnly = false;
     private List<TaskWrapper> tasks = new ArrayList<TaskWrapper>();
 
     @Override

+ 4 - 2
ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/Grouping.java

@@ -68,7 +68,7 @@ public class Grouping {
      * @param pc the ProcessingComponent derived from the upgrade pack.
      */
     @Override
-    public void add(HostsType hostsType, String service, ProcessingComponent pc) {
+    public void add(HostsType hostsType, String service, boolean clientOnly, ProcessingComponent pc) {
       if (null != pc.preTasks && pc.preTasks.size() > 0) {
         List<TaskWrapper> preTasks = TaskWrapperBuilder.getTaskList(service, pc.name, hostsType, pc.preTasks);
         Set<String> preTasksEffectiveHosts = TaskWrapperBuilder.getEffectiveHosts(preTasks);
@@ -104,7 +104,9 @@ public class Grouping {
         stages.add(stage);
       }
 
-      serviceChecks.add(service);
+      if (!clientOnly) {
+        serviceChecks.add(service);
+      }
     }
 
     @Override

+ 5 - 4
ambari-server/src/main/java/org/apache/ambari/server/state/stack/upgrade/StageWrapperBuilder.java

@@ -31,11 +31,12 @@ public abstract class StageWrapperBuilder {
   /**
    * Adds a processing component that will be built into stage wrappers.
    *
-   * @param hostsType the hosts, along with their type
-   * @param service the service name
-   * @param pc the ProcessingComponent derived from the upgrade pack.
+   * @param hostsType   the hosts, along with their type
+   * @param service     the service name
+   * @param clientOnly  whether the service is client only, no service checks
+   * @param pc          the ProcessingComponent derived from the upgrade pack
    */
-  public abstract void add(HostsType hostsType, String service, ProcessingComponent pc);
+  public abstract void add(HostsType hostsType, String service, boolean clientOnly, ProcessingComponent pc);
 
   /**
    * Builds the stage wrappers.

+ 8 - 0
ambari-server/src/main/resources/common-services/HBASE/0.96.0.2.0/package/scripts/hbase_client.py

@@ -25,6 +25,14 @@ from hbase import hbase
 
          
 class HbaseClient(Script):
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hbase-client {version}"))
+
   def install(self, env):
     self.install_packages(env)
     self.configure(env)

+ 6 - 0
ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/hdfs_client.py

@@ -30,6 +30,12 @@ class HdfsClient(Script):
     env.set_params(params)
     self.config(env)
 
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def start(self, env, rolling_restart=False):
     import params
 

+ 8 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive_client.py

@@ -23,6 +23,14 @@ from resource_management import *
 from hive import hive
 
 class HiveClient(Script):
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def install(self, env):
     import params
     self.install_packages(env, exclude_packages=params.hive_exclude_packages)

+ 3 - 0
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/params.py

@@ -32,6 +32,9 @@ hdp_stack_version = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
 stack_is_hdp21 = hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.1') >= 0 and compare_versions(hdp_stack_version, '2.2') < 0
 
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
 # Hadoop params
 # TODO, this logic should initialize these parameters in a file inside the HDP 2.2 stack.
 if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >=0:

+ 4 - 1
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/params.py

@@ -29,6 +29,9 @@ tmp_dir = Script.get_tmp_dir()
 hdp_stack_version = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
 
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
 #hadoop params
 if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
@@ -69,4 +72,4 @@ HdfsDirectory = functools.partial(
   keytab = hdfs_user_keytab,
   kinit_path_local = kinit_path_local,
   bin_dir = hadoop_bin_dir
-)
+)

+ 8 - 0
ambari-server/src/main/resources/common-services/PIG/0.12.0.2.0/package/scripts/pig_client.py

@@ -25,6 +25,14 @@ from pig import pig
 
 
 class PigClient(Script):
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def install(self, env):
     self.install_packages(env)
     self.configure(env)

+ 7 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/mapreduce2_client.py

@@ -26,6 +26,13 @@ from yarn import yarn
 
 class MapReduce2Client(Script):
 
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def install(self, env):
     self.install_packages(env)
     self.configure(env)

+ 7 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/YARN/package/scripts/yarn_client.py

@@ -26,6 +26,13 @@ from yarn import yarn
 
 class YarnClient(Script):
 
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def install(self, env):
     self.install_packages(env)
     self.configure(env)

+ 3 - 0
ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/params.py

@@ -28,6 +28,9 @@ config = Script.get_config()
 hdp_stack_version = str(config['hostLevelParams']['stack_version'])
 hdp_stack_version = format_hdp_stack_version(hdp_stack_version)
 
+# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
+version = default("/commandParams/version", None)
+
 if hdp_stack_version != "" and compare_versions(hdp_stack_version, '2.2') >= 0:
   hadoop_bin_dir = "/usr/hdp/current/hadoop-client/bin"
 else:

+ 8 - 0
ambari-server/src/main/resources/stacks/HDP/2.1/services/TEZ/package/scripts/tez_client.py

@@ -24,6 +24,14 @@ from resource_management import *
 from tez import tez
 
 class TezClient(Script):
+
+  def pre_rolling_restart(self, env):
+    import params
+    env.set_params(params)
+
+    if params.version and compare_versions(format_hdp_stack_version(params.version), '2.2.0.0') >= 0:
+      Execute(format("hdp-select set hadoop-client {version}"))
+
   def install(self, env):
     self.install_packages(env)
     self.configure(env)

+ 82 - 16
ambari-server/src/main/resources/stacks/HDP/2.2/upgrades/upgrade-2.2.xml

@@ -16,18 +16,6 @@
    limitations under the License.
 -->
 
-<!-- Sample Usage of tasks.
-<task xsi:type="execute">
-  <command>echo 'Hello World'</command>
-</task>
-<task xsi:type="configure">
-  <key>prop1</key>
-  <value>value1</value>
-</task>
-<task xsi:type="manual">
-  <message>Please perform the following manual step</message>
-</task>
--->
 
 <upgrade xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
   <target>2.2.*.*</target>
@@ -78,15 +66,46 @@
         <message>Please run additional tests</message>
       </batch>
     </group>
-    
+
+    <group name="CLIENTS" title="Client Components">
+      <service name="HDFS">
+        <component>HDFS_CLIENT</component>
+      </service>
+
+      <service name="YARN">
+        <component>YARN_CLIENT</component>
+      </service>
+
+      <service name="MAPREDUCE2">
+        <component>MAPREDUCE2_CLIENT</component>
+       </service>
+
+       <service name="TEZ">
+         <component>TEZ_CLIENT</component>
+       </service>
+
+       <service name="HBASE">
+         <component>HBASE_CLIENT</component>
+       </service>
+
+       <service name="PIG">
+         <component name="PIG" />
+       </service>
+
+       <service name="HIVE">
+         <component>HIVE_CLIENT</component>
+         <component>HCAT</component>
+       </service>
+    </group>
+
     <group xsi:type="cluster" name="POST_CLUSTER" title="Finalize Upgrade">
-      <!-- 
+    <!--
       <execute-stage title="Confirm Finalize">
         <task xsi:type="manual">
           <message>Please confirm you are ready to finalize</message>
         </task>
       </execute-stage>
-      -->      
+    -->  
       <execute-stage service="HDFS" component="NAMENODE" title="Execute HDFS Finalize">
         <task xsi:type="execute" hosts="master">
           <first>su - {{hadoop-env/hdfs_user}} -c 'hdfs dfsadmin -rollingUpgrade finalize'</first>
@@ -131,7 +150,6 @@
 
     <service name="HDFS">
       <component name="NAMENODE">
-
         <pre-upgrade>
           <!-- Backup the image,
           Enter Safemode if not already in it,
@@ -198,6 +216,12 @@
           <task xsi:type="restart" />
         </upgrade>
       </component>
+      
+      <component name="HDFS_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
 
       <component name="JOURNALNODE">
         <upgrade>
@@ -213,6 +237,12 @@
           <task xsi:type="restart" />
         </upgrade>
       </component>
+      
+      <component name="MAPREDUCE2_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
     </service>
 
     <service name="YARN">
@@ -241,7 +271,13 @@
         </upgrade>
       </component>
 
+      <component name="YARN_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
     </service>
+    
     <service name="HBASE">
       <component name="HBASE_MASTER">
         <pre-upgrade>
@@ -259,6 +295,36 @@
           <task xsi:type="restart" />
         </upgrade>
       </component>
+      
+      <component name="HBASE_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
+    </service>
+    
+    <service name="TEZ">
+      <component name="TEZ_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
+    </service>
+    
+    <service name="PIG">
+      <component name="PIG">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
+    </service>
+    
+    <service name="HIVE">
+      <component name="HIVE_CLIENT">
+        <upgrade>
+          <task xsi:type="restart" />
+        </upgrade>
+      </component>
     </service>
   </processing>
 </upgrade>

+ 2 - 2
ambari-server/src/test/java/org/apache/ambari/server/controller/internal/UpgradeResourceProviderTest.java

@@ -166,7 +166,7 @@ public class UpgradeResourceProviderTest {
     UpgradeEntity entity = upgrades.get(0);
     assertEquals(cluster.getClusterId(), entity.getClusterId().longValue());
 
-    assertEquals(5, entity.getUpgradeGroups().size());
+    assertEquals(4, entity.getUpgradeGroups().size());
 
     UpgradeGroupEntity group = entity.getUpgradeGroups().get(1);
     assertEquals(4, group.getItems().size());
@@ -236,7 +236,7 @@ public class UpgradeResourceProviderTest {
     ResourceProvider upgradeGroupResourceProvider = new UpgradeGroupResourceProvider(amc);
     resources = upgradeGroupResourceProvider.getResources(request, predicate);
 
-    assertEquals(5, resources.size());
+    assertEquals(4, resources.size());
     res = resources.iterator().next();
     assertNotNull(res.getPropertyValue("UpgradeGroup/status"));
     assertNotNull(res.getPropertyValue("UpgradeGroup/group_id"));

+ 11 - 2
ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_client.py

@@ -189,7 +189,16 @@ class TestHBaseClient(RMFTestCase):
                               content='log4jproperties\nline2'
     )
     self.assertNoMoreResources()
-    
 
-    
 
+  def test_upgrade(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_client.py",
+                   classname = "HbaseClient",
+                   command = "restart",
+                   config_file="client-upgrade.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+
+    self.assertResourceCalled("Execute", "hdp-select set hbase-client 2.2.1.0-2067")
+
+    # for now, it's enough that hdp-select is confirmed

+ 12 - 0
ambari-server/src/test/python/stacks/2.0.6/HDFS/test_hdfs_client.py

@@ -66,3 +66,15 @@ class Test(RMFTestCase):
                               action = ['delete'],
                               )
     self.assertNoMoreResources()
+
+  def test_upgrade(self):
+    self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hdfs_client.py",
+                   classname = "HdfsClient",
+                   command = "restart",
+                   config_file="client-upgrade.json",
+                   hdp_stack_version = self.STACK_VERSION,
+                   target = RMFTestCase.TARGET_COMMON_SERVICES)
+
+    self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
+
+    # for now, it's enough that hdp-select is confirmed

+ 9 - 0
ambari-server/src/test/python/stacks/2.0.6/YARN/test_mapreduce2_client.py

@@ -309,3 +309,12 @@ class TestMapReduce2Client(RMFTestCase):
                               )
     self.assertNoMoreResources()
 
+  def test_upgrade(self):
+    self.executeScript("2.0.6/services/YARN/package/scripts/mapreduce2_client.py",
+                   classname = "MapReduce2Client",
+                   command = "restart",
+                   config_file="client-upgrade.json")
+
+    self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
+
+    # for now, it's enough that hdp-select is confirmed

+ 9 - 0
ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py

@@ -445,3 +445,12 @@ class TestYarnClient(RMFTestCase):
     self.assertNoMoreResources()
 
 
+  def test_upgrade(self):
+    self.executeScript("2.0.6/services/YARN/package/scripts/yarn_client.py",
+                   classname = "YarnClient",
+                   command = "restart",
+                   config_file="client-upgrade.json")
+
+    self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
+
+    # for now, it's enough that hdp-select is confirmed

Dosya farkı çok büyük olduğundan ihmal edildi
+ 139 - 0
ambari-server/src/test/python/stacks/2.0.6/configs/client-upgrade.json


+ 9 - 0
ambari-server/src/test/python/stacks/2.1/TEZ/test_tez_client.py

@@ -56,3 +56,12 @@ class TestTezClient(RMFTestCase):
     self.assertNoMoreResources()
 
 
+  def test_upgrade(self):
+    self.executeScript("2.1/services/TEZ/package/scripts/tez_client.py",
+                       classname = "TezClient",
+                       command = "restart",
+                       config_file="client-upgrade.json")
+
+    self.assertResourceCalled("Execute", "hdp-select set hadoop-client 2.2.1.0-2067")
+
+    # for now, it's enough that hdp-select is confirmed

Dosya farkı çok büyük olduğundan ihmal edildi
+ 139 - 0
ambari-server/src/test/python/stacks/2.1/configs/client-upgrade.json


Bu fark içinde çok fazla dosya değişikliği olduğu için bazı dosyalar gösterilmiyor