|
@@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|
|
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
|
|
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
|
|
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
|
|
+import org.apache.hadoop.yarn.api.records.NodeLabel;
|
|
|
import org.apache.hadoop.yarn.api.records.Priority;
|
|
|
import org.apache.hadoop.yarn.api.records.QueueInfo;
|
|
|
import org.apache.hadoop.yarn.api.records.Resource;
|
|
@@ -64,8 +65,10 @@ import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
|
|
|
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
|
|
import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
|
|
|
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMWithAMS;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager;
|
|
|
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
|
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
|
@@ -83,12 +86,15 @@ import org.apache.hadoop.yarn.util.resource.Resources;
|
|
|
import org.junit.Assert;
|
|
|
import org.junit.Test;
|
|
|
|
|
|
+import com.google.common.collect.ImmutableSet;
|
|
|
import com.google.common.collect.Sets;
|
|
|
|
|
|
public class TestSchedulerUtils {
|
|
|
|
|
|
private static final Log LOG = LogFactory.getLog(TestSchedulerUtils.class);
|
|
|
|
|
|
+ private RMContext rmContext = getMockRMContext();
|
|
|
+
|
|
|
@Test (timeout = 30000)
|
|
|
public void testNormalizeRequest() {
|
|
|
ResourceCalculator resourceCalculator = new DefaultResourceCalculator();
|
|
@@ -206,6 +212,9 @@ public class TestSchedulerUtils {
|
|
|
// set queue accessible node labesl to [x, y]
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x"),
|
|
|
+ NodeLabel.newInstance("y")));
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
|
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
|
|
@@ -213,22 +222,44 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
resReq.setNodeLabelExpression("x");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression("y");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression("");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression(" ");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
e.printStackTrace();
|
|
|
fail("Should be valid when request labels is a subset of queue labels");
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x", "y"));
|
|
|
+ }
|
|
|
+
|
|
|
+ // same as above, but cluster node labels don't contains label being
|
|
|
+ // requested. should fail
|
|
|
+ try {
|
|
|
+ // set queue accessible node labesl to [x, y]
|
|
|
+ queueAccessibleNodeLabels.clear();
|
|
|
+ queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
|
|
|
+ Resource resource = Resources.createResource(
|
|
|
+ 0,
|
|
|
+ YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
|
|
|
+ ResourceRequest resReq = BuilderUtils.newResourceRequest(
|
|
|
+ mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
+ resReq.setNodeLabelExpression("x");
|
|
|
+ SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
+ scheduler, rmContext);
|
|
|
+
|
|
|
+ fail("Should fail");
|
|
|
+ } catch (InvalidResourceRequestException e) {
|
|
|
}
|
|
|
|
|
|
// queue has labels, failed cases (when ask a label not included by queue)
|
|
@@ -236,6 +267,9 @@ public class TestSchedulerUtils {
|
|
|
// set queue accessible node labesl to [x, y]
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x"),
|
|
|
+ NodeLabel.newInstance("y")));
|
|
|
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
@@ -244,9 +278,12 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
resReq.setNodeLabelExpression("z");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
fail("Should fail");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x", "y"));
|
|
|
}
|
|
|
|
|
|
// we don't allow specify more than two node labels in a single expression
|
|
@@ -255,6 +292,9 @@ public class TestSchedulerUtils {
|
|
|
// set queue accessible node labesl to [x, y]
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x"),
|
|
|
+ NodeLabel.newInstance("y")));
|
|
|
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
@@ -263,9 +303,12 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
resReq.setNodeLabelExpression("x && y");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
fail("Should fail");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x", "y"));
|
|
|
}
|
|
|
|
|
|
// queue doesn't have label, succeed (when request no label)
|
|
@@ -280,15 +323,15 @@ public class TestSchedulerUtils {
|
|
|
ResourceRequest resReq = BuilderUtils.newResourceRequest(
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression("");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression(" ");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
e.printStackTrace();
|
|
|
fail("Should be valid when request labels is empty");
|
|
@@ -299,6 +342,9 @@ public class TestSchedulerUtils {
|
|
|
// set queue accessible node labels to empty
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x")));
|
|
|
+
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
|
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
|
|
@@ -306,9 +352,12 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
resReq.setNodeLabelExpression("x");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
fail("Should fail");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x"));
|
|
|
}
|
|
|
|
|
|
// queue is "*", always succeeded
|
|
@@ -317,6 +366,10 @@ public class TestSchedulerUtils {
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY);
|
|
|
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x"),
|
|
|
+ NodeLabel.newInstance("y"), NodeLabel.newInstance("z")));
|
|
|
+
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
|
YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
|
|
@@ -324,18 +377,39 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
resReq.setNodeLabelExpression("x");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression("y");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
|
|
|
resReq.setNodeLabelExpression("z");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
e.printStackTrace();
|
|
|
fail("Should be valid when queue can access any labels");
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x", "y", "z"));
|
|
|
+ }
|
|
|
+
|
|
|
+ // same as above, but cluster node labels don't contains label, should fail
|
|
|
+ try {
|
|
|
+ // set queue accessible node labels to empty
|
|
|
+ queueAccessibleNodeLabels.clear();
|
|
|
+ queueAccessibleNodeLabels.add(RMNodeLabelsManager.ANY);
|
|
|
+
|
|
|
+ Resource resource = Resources.createResource(
|
|
|
+ 0,
|
|
|
+ YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES);
|
|
|
+ ResourceRequest resReq = BuilderUtils.newResourceRequest(
|
|
|
+ mock(Priority.class), ResourceRequest.ANY, resource, 1);
|
|
|
+ resReq.setNodeLabelExpression("x");
|
|
|
+ SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
+ scheduler, rmContext);
|
|
|
+ fail("Should fail");
|
|
|
+ } catch (InvalidResourceRequestException e) {
|
|
|
}
|
|
|
|
|
|
// we don't allow resource name other than ANY and specify label
|
|
@@ -343,6 +417,9 @@ public class TestSchedulerUtils {
|
|
|
// set queue accessible node labesl to [x, y]
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.addAll(Arrays.asList("x", "y"));
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x"),
|
|
|
+ NodeLabel.newInstance("y")));
|
|
|
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
@@ -351,9 +428,12 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), "rack", resource, 1);
|
|
|
resReq.setNodeLabelExpression("x");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
fail("Should fail");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x", "y"));
|
|
|
}
|
|
|
|
|
|
// we don't allow resource name other than ANY and specify label even if
|
|
@@ -363,6 +443,8 @@ public class TestSchedulerUtils {
|
|
|
queueAccessibleNodeLabels.clear();
|
|
|
queueAccessibleNodeLabels.addAll(Arrays
|
|
|
.asList(CommonNodeLabelsManager.ANY));
|
|
|
+ rmContext.getNodeLabelManager().addToCluserNodeLabels(
|
|
|
+ ImmutableSet.of(NodeLabel.newInstance("x")));
|
|
|
|
|
|
Resource resource = Resources.createResource(
|
|
|
0,
|
|
@@ -371,9 +453,12 @@ public class TestSchedulerUtils {
|
|
|
mock(Priority.class), "rack", resource, 1);
|
|
|
resReq.setNodeLabelExpression("x");
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, "queue",
|
|
|
- scheduler);
|
|
|
+ scheduler, rmContext);
|
|
|
fail("Should fail");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
+ } finally {
|
|
|
+ rmContext.getNodeLabelManager().removeFromClusterNodeLabels(
|
|
|
+ Arrays.asList("x"));
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -395,7 +480,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
fail("Zero memory should be accepted");
|
|
|
}
|
|
@@ -409,7 +494,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
fail("Zero vcores should be accepted");
|
|
|
}
|
|
@@ -424,7 +509,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
fail("Max memory should be accepted");
|
|
|
}
|
|
@@ -439,7 +524,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
fail("Max vcores should not be accepted");
|
|
|
}
|
|
@@ -453,7 +538,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
fail("Negative memory should not be accepted");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
// expected
|
|
@@ -468,7 +553,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
fail("Negative vcores should not be accepted");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
// expected
|
|
@@ -484,7 +569,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
fail("More than max memory should not be accepted");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
// expected
|
|
@@ -501,7 +586,7 @@ public class TestSchedulerUtils {
|
|
|
BuilderUtils.newResourceRequest(mock(Priority.class),
|
|
|
ResourceRequest.ANY, resource, 1);
|
|
|
SchedulerUtils.normalizeAndvalidateRequest(resReq, maxResource, null,
|
|
|
- mockScheduler);
|
|
|
+ mockScheduler, rmContext);
|
|
|
fail("More than max vcores should not be accepted");
|
|
|
} catch (InvalidResourceRequestException e) {
|
|
|
// expected
|
|
@@ -632,4 +717,12 @@ public class TestSchedulerUtils {
|
|
|
Assert.assertNull(applications.get(appId));
|
|
|
return app;
|
|
|
}
|
|
|
+
|
|
|
+ private static RMContext getMockRMContext() {
|
|
|
+ RMContext rmContext = mock(RMContext.class);
|
|
|
+ RMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
|
|
|
+ nlm.init(new Configuration(false));
|
|
|
+ when(rmContext.getNodeLabelManager()).thenReturn(nlm);
|
|
|
+ return rmContext;
|
|
|
+ }
|
|
|
}
|