|
@@ -1506,8 +1506,8 @@ public class TestLeafQueue {
|
|
@Test
|
|
@Test
|
|
public void testLocalityScheduling() throws Exception {
|
|
public void testLocalityScheduling() throws Exception {
|
|
|
|
|
|
- // Manipulate queue 'a'
|
|
|
|
- LeafQueue a = stubLeafQueue((LeafQueue)queues.get(A));
|
|
|
|
|
|
+ // Manipulate queue 'b'
|
|
|
|
+ LeafQueue a = stubLeafQueue((LeafQueue)queues.get(B));
|
|
|
|
|
|
// User
|
|
// User
|
|
String user_0 = "user_0";
|
|
String user_0 = "user_0";
|
|
@@ -1614,34 +1614,87 @@ public class TestLeafQueue {
|
|
TestUtils.createResourceRequest(host_1, 1*GB, 1,
|
|
TestUtils.createResourceRequest(host_1, 1*GB, 1,
|
|
true, priority, recordFactory));
|
|
true, priority, recordFactory));
|
|
app_0_requests_0.add(
|
|
app_0_requests_0.add(
|
|
- TestUtils.createResourceRequest(rack_1, 1*GB, 1,
|
|
|
|
|
|
+ TestUtils.createResourceRequest(rack_1, 1*GB, 3,
|
|
true, priority, recordFactory));
|
|
true, priority, recordFactory));
|
|
app_0_requests_0.add(
|
|
app_0_requests_0.add(
|
|
- TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, // one extra
|
|
|
|
|
|
+ TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 4, // one extra
|
|
true, priority, recordFactory));
|
|
true, priority, recordFactory));
|
|
app_0.updateResourceRequests(app_0_requests_0);
|
|
app_0.updateResourceRequests(app_0_requests_0);
|
|
- assertEquals(2, app_0.getTotalRequiredResources(priority));
|
|
|
|
|
|
+ assertEquals(4, app_0.getTotalRequiredResources(priority));
|
|
|
|
|
|
String host_3 = "127.0.0.4"; // on rack_1
|
|
String host_3 = "127.0.0.4"; // on rack_1
|
|
FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8*GB);
|
|
FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8*GB);
|
|
|
|
|
|
// Rack-delay
|
|
// Rack-delay
|
|
|
|
+ doReturn(true).when(a).getRackLocalityFullReset();
|
|
doReturn(1).when(a).getNodeLocalityDelay();
|
|
doReturn(1).when(a).getNodeLocalityDelay();
|
|
|
|
|
|
// Shouldn't assign RACK_LOCAL yet
|
|
// Shouldn't assign RACK_LOCAL yet
|
|
assignment = a.assignContainers(clusterResource, node_3,
|
|
assignment = a.assignContainers(clusterResource, node_3,
|
|
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
assertEquals(1, app_0.getSchedulingOpportunities(priority));
|
|
assertEquals(1, app_0.getSchedulingOpportunities(priority));
|
|
- assertEquals(2, app_0.getTotalRequiredResources(priority));
|
|
|
|
|
|
+ assertEquals(4, app_0.getTotalRequiredResources(priority));
|
|
|
|
|
|
// Should assign RACK_LOCAL now
|
|
// Should assign RACK_LOCAL now
|
|
assignment = a.assignContainers(clusterResource, node_3,
|
|
assignment = a.assignContainers(clusterResource, node_3,
|
|
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
|
|
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
|
|
assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
|
|
assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset
|
|
|
|
+ assertEquals(3, app_0.getTotalRequiredResources(priority));
|
|
|
|
+
|
|
|
|
+ // Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset.
|
|
|
|
+ assignment = a.assignContainers(clusterResource, node_3,
|
|
|
|
+ new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
|
|
+ assertEquals(1, app_0.getSchedulingOpportunities(priority));
|
|
|
|
+ assertEquals(3, app_0.getTotalRequiredResources(priority));
|
|
|
|
+
|
|
|
|
+ // Next time we schedule RACK_LOCAL, don't reset
|
|
|
|
+ doReturn(false).when(a).getRackLocalityFullReset();
|
|
|
|
+
|
|
|
|
+ // Should assign RACK_LOCAL now
|
|
|
|
+ assignment = a.assignContainers(clusterResource, node_3,
|
|
|
|
+ new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
|
|
+ verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
|
|
|
|
+ assertEquals(2, app_0.getSchedulingOpportunities(priority)); // should NOT reset
|
|
|
|
+ assertEquals(2, app_0.getTotalRequiredResources(priority));
|
|
|
|
+
|
|
|
|
+ // Another RACK_LOCAL since schedulingOpportunities not reset
|
|
|
|
+ assignment = a.assignContainers(clusterResource, node_3,
|
|
|
|
+ new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
|
|
+ verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
|
|
|
|
+ assertEquals(3, app_0.getSchedulingOpportunities(priority)); // should NOT reset
|
|
assertEquals(1, app_0.getTotalRequiredResources(priority));
|
|
assertEquals(1, app_0.getTotalRequiredResources(priority));
|
|
|
|
+
|
|
|
|
+ // Add a request larger than cluster size to verify
|
|
|
|
+ // OFF_SWITCH delay is capped by cluster size
|
|
|
|
+ app_0.resetSchedulingOpportunities(priority);
|
|
|
|
+ app_0_requests_0.clear();
|
|
|
|
+ app_0_requests_0.add(
|
|
|
|
+ TestUtils.createResourceRequest(host_0, 1*GB, 100,
|
|
|
|
+ true, priority, recordFactory));
|
|
|
|
+ app_0_requests_0.add(
|
|
|
|
+ TestUtils.createResourceRequest(rack_0, 1*GB, 100,
|
|
|
|
+ true, priority, recordFactory));
|
|
|
|
+ app_0_requests_0.add(
|
|
|
|
+ TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 100,
|
|
|
|
+ true, priority, recordFactory));
|
|
|
|
+ app_0.updateResourceRequests(app_0_requests_0);
|
|
|
|
+
|
|
|
|
+ // Start with off switch. 3 nodes in cluster so shouldn't allocate first 3
|
|
|
|
+ for (int i = 0; i < numNodes; i++) {
|
|
|
|
+ assignment =
|
|
|
|
+ a.assignContainers(clusterResource, node_2, new ResourceLimits(
|
|
|
|
+ clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
|
|
+ verifyNoContainerAllocated(assignment);
|
|
|
|
+ assertEquals(i+1, app_0.getSchedulingOpportunities(priority));
|
|
|
|
+ }
|
|
|
|
+ // delay should be capped at numNodes so next one should allocate
|
|
|
|
+ assignment = a.assignContainers(clusterResource, node_2,
|
|
|
|
+ new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
|
|
|
|
+ verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
|
|
|
|
+ assertEquals(numNodes+1, app_0.getSchedulingOpportunities(priority));
|
|
}
|
|
}
|
|
-
|
|
|
|
|
|
+
|
|
@Test
|
|
@Test
|
|
public void testApplicationPriorityScheduling() throws Exception {
|
|
public void testApplicationPriorityScheduling() throws Exception {
|
|
// Manipulate queue 'a'
|
|
// Manipulate queue 'a'
|