|
@@ -92,10 +92,13 @@ import com.google.inject.persist.UnitOfWork;
|
|
|
* Tests upgrade-related server side actions
|
|
|
*/
|
|
|
public class UpgradeActionTest {
|
|
|
+ private static final String clusterName = "c1";
|
|
|
+
|
|
|
private static final String HDP_2_1_1_0 = "2.1.1.0-1";
|
|
|
private static final String HDP_2_1_1_1 = "2.1.1.1-2";
|
|
|
|
|
|
private static final String HDP_2_2_0_1 = "2.2.0.1-3";
|
|
|
+ private static final String HDP_2_2_0_2 = "2.2.0.2-4";
|
|
|
|
|
|
private static final StackId HDP_21_STACK = new StackId("HDP-2.1.1");
|
|
|
private static final StackId HDP_22_STACK = new StackId("HDP-2.2.0");
|
|
@@ -161,7 +164,6 @@ public class UpgradeActionTest {
|
|
|
}
|
|
|
|
|
|
private void makeDowngradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
|
|
|
- String clusterName = "c1";
|
|
|
String hostName = "h1";
|
|
|
|
|
|
clusters.addCluster(clusterName, sourceStack);
|
|
@@ -200,8 +202,71 @@ public class UpgradeActionTest {
|
|
|
hostVersionDAO.create(entity);
|
|
|
}
|
|
|
|
|
|
+ private void makeTwoUpgradesWhereLastDidNotComplete(StackId sourceStack, String sourceRepo, StackId midStack, String midRepo, StackId targetStack, String targetRepo) throws Exception {
|
|
|
+ String hostName = "h1";
|
|
|
+
|
|
|
+ clusters.addCluster(clusterName, sourceStack);
|
|
|
+
|
|
|
+ Cluster c = clusters.getCluster(clusterName);
|
|
|
+
|
|
|
+ // add a host component
|
|
|
+ clusters.addHost(hostName);
|
|
|
+
|
|
|
+ Host host = clusters.getHost(hostName);
|
|
|
+
|
|
|
+ Map<String, String> hostAttributes = new HashMap<String, String>();
|
|
|
+ hostAttributes.put("os_family", "redhat");
|
|
|
+ hostAttributes.put("os_release_version", "6");
|
|
|
+ host.setHostAttributes(hostAttributes);
|
|
|
+ host.persist();
|
|
|
+
|
|
|
+ // Create the starting repo version
|
|
|
+ m_helper.getOrCreateRepositoryVersion(sourceStack, sourceRepo);
|
|
|
+ c.createClusterVersion(sourceStack, sourceRepo, "admin", RepositoryVersionState.UPGRADING);
|
|
|
+ c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.CURRENT);
|
|
|
+
|
|
|
+ // Start upgrading the mid repo
|
|
|
+ m_helper.getOrCreateRepositoryVersion(midStack, midRepo);
|
|
|
+ c.setDesiredStackVersion(midStack);
|
|
|
+ c.createClusterVersion(midStack, midRepo, "admin", RepositoryVersionState.INSTALLING);
|
|
|
+ c.transitionClusterVersion(midStack, midRepo, RepositoryVersionState.INSTALLED);
|
|
|
+ c.transitionClusterVersion(midStack, midRepo, RepositoryVersionState.UPGRADING);
|
|
|
+ c.transitionClusterVersion(midStack, midRepo, RepositoryVersionState.UPGRADED);
|
|
|
+ c.transitionClusterVersion(midStack, midRepo, RepositoryVersionState.CURRENT);
|
|
|
+
|
|
|
+ // Set original version as INSTALLED
|
|
|
+ c.transitionClusterVersion(sourceStack, sourceRepo, RepositoryVersionState.INSTALLED);
|
|
|
+
|
|
|
+ // Notice that we have not yet changed the cluster current stack to the mid stack to simulate
|
|
|
+ // the user skipping this step.
|
|
|
+
|
|
|
+ m_helper.getOrCreateRepositoryVersion(targetStack, targetRepo);
|
|
|
+ c.setDesiredStackVersion(targetStack);
|
|
|
+ c.createClusterVersion(targetStack, targetRepo, "admin", RepositoryVersionState.INSTALLING);
|
|
|
+ c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.INSTALLED);
|
|
|
+ c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADING);
|
|
|
+ c.transitionClusterVersion(targetStack, targetRepo, RepositoryVersionState.UPGRADED);
|
|
|
+
|
|
|
+ // Create a host version for the starting repo in INSTALLED
|
|
|
+ HostVersionEntity entitySource = new HostVersionEntity();
|
|
|
+ entitySource.setHostEntity(hostDAO.findByName(hostName));
|
|
|
+ entitySource.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(sourceStack, sourceRepo));
|
|
|
+ entitySource.setState(RepositoryVersionState.INSTALL_FAILED);
|
|
|
+ hostVersionDAO.create(entitySource);
|
|
|
+
|
|
|
+ // Create a host version for the mid repo in CURRENT
|
|
|
+ c.mapHostVersions(Collections.singleton(hostName), c.getCurrentClusterVersion(),
|
|
|
+ RepositoryVersionState.CURRENT);
|
|
|
+
|
|
|
+ // Create a host version for the target repo in UPGRADED
|
|
|
+ HostVersionEntity entityTarget = new HostVersionEntity();
|
|
|
+ entityTarget.setHostEntity(hostDAO.findByName(hostName));
|
|
|
+ entityTarget.setRepositoryVersion(repoVersionDAO.findByStackAndVersion(targetStack, targetRepo));
|
|
|
+ entityTarget.setState(RepositoryVersionState.UPGRADED);
|
|
|
+ hostVersionDAO.create(entityTarget);
|
|
|
+ }
|
|
|
+
|
|
|
private void makeUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
|
|
|
- String clusterName = "c1";
|
|
|
String hostName = "h1";
|
|
|
|
|
|
Clusters clusters = m_injector.getInstance(Clusters.class);
|
|
@@ -272,7 +337,6 @@ public class UpgradeActionTest {
|
|
|
}
|
|
|
|
|
|
private void makeCrossStackUpgradeCluster(StackId sourceStack, String sourceRepo, StackId targetStack, String targetRepo) throws Exception {
|
|
|
- String clusterName = "c1";
|
|
|
String hostName = "h1";
|
|
|
|
|
|
Clusters clusters = m_injector.getInstance(Clusters.class);
|
|
@@ -356,7 +420,7 @@ public class UpgradeActionTest {
|
|
|
RepositoryVersionEntity targetRve = repoVersionDAO.findByStackNameAndVersion("HDP", targetRepo);
|
|
|
Assert.assertNotNull(targetRve);
|
|
|
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
// Install ZK and HDFS with some components
|
|
|
Service zk = installService(cluster, "ZOOKEEPER");
|
|
@@ -391,7 +455,7 @@ public class UpgradeActionTest {
|
|
|
String userName = "admin";
|
|
|
roleParams.put(ServerAction.ACTION_USER_NAME, userName);
|
|
|
executionCommand.setRoleParams(roleParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
|
|
@@ -425,10 +489,12 @@ public class UpgradeActionTest {
|
|
|
Map<String, String> commandParams = new HashMap<String, String>();
|
|
|
commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
|
|
|
commandParams.put(FinalizeUpgradeAction.VERSION_KEY, sourceRepo);
|
|
|
+ commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
|
|
|
+ commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
|
|
@@ -441,7 +507,7 @@ public class UpgradeActionTest {
|
|
|
assertNotNull(report);
|
|
|
assertEquals(HostRoleStatus.COMPLETED.name(), report.getStatus());
|
|
|
|
|
|
- for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost("c1", "h1")) {
|
|
|
+ for (HostVersionEntity entity : hostVersionDAO.findByClusterAndHost(clusterName, "h1")) {
|
|
|
if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
|
|
|
assertEquals(RepositoryVersionState.CURRENT, entity.getState());
|
|
|
} else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
|
|
@@ -449,7 +515,7 @@ public class UpgradeActionTest {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- for (ClusterVersionEntity entity : clusterVersionDAO.findByCluster("c1")) {
|
|
|
+ for (ClusterVersionEntity entity : clusterVersionDAO.findByCluster(clusterName)) {
|
|
|
if (entity.getRepositoryVersion().getVersion().equals(sourceRepo)) {
|
|
|
assertEquals(RepositoryVersionState.CURRENT, entity.getState());
|
|
|
} else if (entity.getRepositoryVersion().getVersion().equals(targetRepo)) {
|
|
@@ -458,6 +524,50 @@ public class UpgradeActionTest {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * Test a case in which a customer performs an upgrade from HDP 2.1 to 2.2 (e.g., 2.2.0.0), but skips the step to
|
|
|
+ * finalize, which calls "Save DB State". Therefore, the cluster's current stack is still on HDP 2.1.
|
|
|
+ * They can still modify the database manually to mark HDP 2.2 as CURRENT in the cluster_version and then begin
|
|
|
+ * another upgrade to 2.2.0.2 and then downgrade.
|
|
|
+ * In the downgrade, the original stack is still 2.1 but the stack for the version marked as CURRENT is 2.2; this
|
|
|
+ * mismatch means that the downgrade should not delete configs and will report a warning.
|
|
|
+ * @throws Exception
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testFinalizeDowngradeWhenDidNotFinalizePreviousUpgrade() throws Exception {
|
|
|
+ StackId sourceStack = HDP_21_STACK;
|
|
|
+ StackId midStack = HDP_22_STACK;
|
|
|
+ StackId targetStack = HDP_22_STACK;
|
|
|
+
|
|
|
+ String sourceRepo = HDP_2_1_1_0;
|
|
|
+ String midRepo = HDP_2_2_0_1;
|
|
|
+ String targetRepo = HDP_2_2_0_2;
|
|
|
+
|
|
|
+ makeTwoUpgradesWhereLastDidNotComplete(sourceStack, sourceRepo, midStack, midRepo, targetStack, targetRepo);
|
|
|
+
|
|
|
+ Map<String, String> commandParams = new HashMap<String, String>();
|
|
|
+ commandParams.put(FinalizeUpgradeAction.UPGRADE_DIRECTION_KEY, "downgrade");
|
|
|
+ commandParams.put(FinalizeUpgradeAction.VERSION_KEY, midRepo);
|
|
|
+ commandParams.put(FinalizeUpgradeAction.ORIGINAL_STACK_KEY, sourceStack.getStackId());
|
|
|
+ commandParams.put(FinalizeUpgradeAction.TARGET_STACK_KEY, targetStack.getStackId());
|
|
|
+
|
|
|
+ ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
+ executionCommand.setCommandParams(commandParams);
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
+
|
|
|
+ HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
+ hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
|
|
|
+
|
|
|
+ FinalizeUpgradeAction action = m_injector.getInstance(FinalizeUpgradeAction.class);
|
|
|
+ action.setExecutionCommand(executionCommand);
|
|
|
+ action.setHostRoleCommand(hostRoleCommand);
|
|
|
+
|
|
|
+ CommandReport report = action.execute(null);
|
|
|
+ assertNotNull(report);
|
|
|
+ assertEquals(HostRoleStatus.FAILED.name(), report.getStatus());
|
|
|
+ assertTrue(report.getStdErr().contains(FinalizeUpgradeAction.PREVIOUS_UPGRADE_NOT_COMPLETED_MSG));
|
|
|
+ }
|
|
|
+
|
|
|
@Test
|
|
|
public void testFinalizeUpgrade() throws Exception {
|
|
|
StackId sourceStack = HDP_21_STACK;
|
|
@@ -471,7 +581,7 @@ public class UpgradeActionTest {
|
|
|
AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
|
|
|
AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
|
|
|
Host host = clusters.getHost("h1");
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
RepositoryInfo repo = metaInfo.getRepository(sourceStack.getStackName(), sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
|
|
|
assertEquals(HDP_211_CENTOS6_REPO_URL, repo.getBaseUrl());
|
|
@@ -484,7 +594,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
|
|
@@ -534,7 +644,7 @@ public class UpgradeActionTest {
|
|
|
AmbariMetaInfo metaInfo = m_injector.getInstance(AmbariMetaInfo.class);
|
|
|
AmbariCustomCommandExecutionHelper helper = m_injector.getInstance(AmbariCustomCommandExecutionHelper.class);
|
|
|
Host host = clusters.getHost("h1");
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
RepositoryInfo repo = metaInfo.getRepository(sourceStack.getStackName(),
|
|
|
sourceStack.getStackVersion(), "redhat6", sourceStack.getStackId());
|
|
@@ -548,7 +658,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
hostRoleCommand.setExecutionCommandWrapper(new ExecutionCommandWrapper(executionCommand));
|
|
@@ -584,7 +694,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
|
|
|
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
// setup the cluster for the upgrade across stacks
|
|
|
cluster.setCurrentStackVersion(sourceStack);
|
|
@@ -598,7 +708,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
|
|
@@ -635,7 +745,7 @@ public class UpgradeActionTest {
|
|
|
String targetRepo = HDP_2_2_0_1;
|
|
|
|
|
|
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
// install HDFS with some components
|
|
|
Service service = installService(cluster, "HDFS");
|
|
@@ -656,7 +766,7 @@ public class UpgradeActionTest {
|
|
|
createConfigs(cluster);
|
|
|
|
|
|
// verify we have configs in both HDP stacks
|
|
|
- cluster = clusters.getCluster("c1");
|
|
|
+ cluster = clusters.getCluster(clusterName);
|
|
|
Collection<Config> configs = cluster.getAllConfigs();
|
|
|
assertEquals(8, configs.size());
|
|
|
|
|
@@ -668,7 +778,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
|
|
@@ -676,7 +786,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
HostVersionDAO dao = m_injector.getInstance(HostVersionDAO.class);
|
|
|
|
|
|
- List<HostVersionEntity> hosts = dao.findByClusterStackAndVersion("c1", targetStack, targetRepo);
|
|
|
+ List<HostVersionEntity> hosts = dao.findByClusterStackAndVersion(clusterName, targetStack, targetRepo);
|
|
|
assertFalse(hosts.isEmpty());
|
|
|
for (HostVersionEntity hve : hosts) {
|
|
|
assertFalse(hve.getState() == RepositoryVersionState.INSTALLED);
|
|
@@ -699,11 +809,11 @@ public class UpgradeActionTest {
|
|
|
assertEquals(sourceStack, desiredStackId);
|
|
|
|
|
|
// verify we have configs in only 1 stack
|
|
|
- cluster = clusters.getCluster("c1");
|
|
|
+ cluster = clusters.getCluster(clusterName);
|
|
|
configs = cluster.getAllConfigs();
|
|
|
assertEquals(4, configs.size());
|
|
|
|
|
|
- hosts = dao.findByClusterStackAndVersion("c1", targetStack, targetRepo);
|
|
|
+ hosts = dao.findByClusterStackAndVersion(clusterName, targetStack, targetRepo);
|
|
|
assertFalse(hosts.isEmpty());
|
|
|
for (HostVersionEntity hve : hosts) {
|
|
|
assertTrue(hve.getState() == RepositoryVersionState.INSTALLED);
|
|
@@ -726,7 +836,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
makeCrossStackUpgradeCluster(sourceStack, sourceRepo, targetStack, targetRepo);
|
|
|
|
|
|
- Cluster cluster = clusters.getCluster("c1");
|
|
|
+ Cluster cluster = clusters.getCluster(clusterName);
|
|
|
|
|
|
Service service = installService(cluster, "HDFS");
|
|
|
addServiceComponent(cluster, service, "NAMENODE");
|
|
@@ -749,14 +859,14 @@ public class UpgradeActionTest {
|
|
|
// inject an unhappy path where the cluster repo version is still UPGRADING
|
|
|
// even though all of the hosts are UPGRADED
|
|
|
ClusterVersionEntity upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(
|
|
|
- "c1", HDP_22_STACK, targetRepo);
|
|
|
+ clusterName, HDP_22_STACK, targetRepo);
|
|
|
|
|
|
upgradingClusterVersion.setState(RepositoryVersionState.UPGRADING);
|
|
|
upgradingClusterVersion = clusterVersionDAO.merge(upgradingClusterVersion);
|
|
|
|
|
|
// verify the conditions for the test are met properly
|
|
|
- upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion("c1", HDP_22_STACK, targetRepo);
|
|
|
- List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion("c1", HDP_22_STACK, targetRepo);
|
|
|
+ upgradingClusterVersion = clusterVersionDAO.findByClusterAndStackAndVersion(clusterName, HDP_22_STACK, targetRepo);
|
|
|
+ List<HostVersionEntity> hostVersions = hostVersionDAO.findByClusterStackAndVersion(clusterName, HDP_22_STACK, targetRepo);
|
|
|
|
|
|
assertEquals(RepositoryVersionState.UPGRADING, upgradingClusterVersion.getState());
|
|
|
assertTrue(hostVersions.size() > 0);
|
|
@@ -774,7 +884,7 @@ public class UpgradeActionTest {
|
|
|
|
|
|
ExecutionCommand executionCommand = new ExecutionCommand();
|
|
|
executionCommand.setCommandParams(commandParams);
|
|
|
- executionCommand.setClusterName("c1");
|
|
|
+ executionCommand.setClusterName(clusterName);
|
|
|
|
|
|
HostRoleCommand hostRoleCommand = hostRoleCommandFactory.create(null, null, null, null);
|
|
|
|