|
@@ -370,11 +370,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
Resource clusterResource, PendingAsk nodeLocalAsk,
|
|
|
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
|
|
|
RMContainer reservedContainer, SchedulingMode schedulingMode,
|
|
|
- ResourceLimits currentResoureLimits) {
|
|
|
+ ResourceLimits currentResourceLimits) {
|
|
|
if (canAssign(schedulerKey, node, NodeType.NODE_LOCAL, reservedContainer)) {
|
|
|
return assignContainer(clusterResource, node, schedulerKey,
|
|
|
nodeLocalAsk, NodeType.NODE_LOCAL, reservedContainer,
|
|
|
- schedulingMode, currentResoureLimits);
|
|
|
+ schedulingMode, currentResourceLimits);
|
|
|
}
|
|
|
|
|
|
// Skip node-local request, go to rack-local request
|
|
@@ -385,11 +385,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
Resource clusterResource, PendingAsk rackLocalAsk,
|
|
|
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
|
|
|
RMContainer reservedContainer, SchedulingMode schedulingMode,
|
|
|
- ResourceLimits currentResoureLimits) {
|
|
|
+ ResourceLimits currentResourceLimits) {
|
|
|
if (canAssign(schedulerKey, node, NodeType.RACK_LOCAL, reservedContainer)) {
|
|
|
return assignContainer(clusterResource, node, schedulerKey,
|
|
|
rackLocalAsk, NodeType.RACK_LOCAL, reservedContainer,
|
|
|
- schedulingMode, currentResoureLimits);
|
|
|
+ schedulingMode, currentResourceLimits);
|
|
|
}
|
|
|
|
|
|
// Skip rack-local request, go to off-switch request
|
|
@@ -400,11 +400,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
Resource clusterResource, PendingAsk offSwitchAsk,
|
|
|
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
|
|
|
RMContainer reservedContainer, SchedulingMode schedulingMode,
|
|
|
- ResourceLimits currentResoureLimits) {
|
|
|
+ ResourceLimits currentResourceLimits) {
|
|
|
if (canAssign(schedulerKey, node, NodeType.OFF_SWITCH, reservedContainer)) {
|
|
|
return assignContainer(clusterResource, node, schedulerKey,
|
|
|
offSwitchAsk, NodeType.OFF_SWITCH, reservedContainer,
|
|
|
- schedulingMode, currentResoureLimits);
|
|
|
+ schedulingMode, currentResourceLimits);
|
|
|
}
|
|
|
|
|
|
application.updateAppSkipNodeDiagnostics(
|
|
@@ -419,7 +419,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
private ContainerAllocation assignContainersOnNode(Resource clusterResource,
|
|
|
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
|
|
|
RMContainer reservedContainer, SchedulingMode schedulingMode,
|
|
|
- ResourceLimits currentResoureLimits) {
|
|
|
+ ResourceLimits currentResourceLimits) {
|
|
|
ContainerAllocation allocation;
|
|
|
NodeType requestLocalityType = null;
|
|
|
|
|
@@ -431,7 +431,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
allocation =
|
|
|
assignNodeLocalContainers(clusterResource, nodeLocalAsk,
|
|
|
node, schedulerKey, reservedContainer, schedulingMode,
|
|
|
- currentResoureLimits);
|
|
|
+ currentResourceLimits);
|
|
|
if (Resources.greaterThan(rc, clusterResource,
|
|
|
allocation.getResourceToBeAllocated(), Resources.none())) {
|
|
|
allocation.requestLocalityType = requestLocalityType;
|
|
@@ -458,7 +458,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
allocation =
|
|
|
assignRackLocalContainers(clusterResource, rackLocalAsk,
|
|
|
node, schedulerKey, reservedContainer, schedulingMode,
|
|
|
- currentResoureLimits);
|
|
|
+ currentResourceLimits);
|
|
|
if (Resources.greaterThan(rc, clusterResource,
|
|
|
allocation.getResourceToBeAllocated(), Resources.none())) {
|
|
|
allocation.requestLocalityType = requestLocalityType;
|
|
@@ -485,7 +485,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
allocation =
|
|
|
assignOffSwitchContainers(clusterResource, offSwitchAsk,
|
|
|
node, schedulerKey, reservedContainer, schedulingMode,
|
|
|
- currentResoureLimits);
|
|
|
+ currentResourceLimits);
|
|
|
|
|
|
// When a returned allocation is LOCALITY_SKIPPED, since we're in
|
|
|
// off-switch request now, we will skip this app w.r.t priorities
|
|
@@ -507,7 +507,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
private ContainerAllocation assignContainer(Resource clusterResource,
|
|
|
FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
|
|
|
PendingAsk pendingAsk, NodeType type, RMContainer rmContainer,
|
|
|
- SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
|
|
|
+ SchedulingMode schedulingMode, ResourceLimits currentResourceLimits) {
|
|
|
|
|
|
if (LOG.isDebugEnabled()) {
|
|
|
LOG.debug("assignContainers: node=" + node.getNodeName()
|
|
@@ -547,8 +547,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
// max(required - headroom, amountNeedUnreserve)
|
|
|
Resource resourceNeedToUnReserve =
|
|
|
Resources.max(rc, clusterResource,
|
|
|
- Resources.subtract(capability, currentResoureLimits.getHeadroom()),
|
|
|
- currentResoureLimits.getAmountNeededUnreserve());
|
|
|
+ Resources.subtract(capability, currentResourceLimits.getHeadroom()),
|
|
|
+ currentResourceLimits.getAmountNeededUnreserve());
|
|
|
|
|
|
boolean needToUnreserve =
|
|
|
rc.isAnyMajorResourceAboveZero(resourceNeedToUnReserve);
|
|
@@ -559,7 +559,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
|
|
|
// Check if we need to kill some containers to allocate this one
|
|
|
List<RMContainer> toKillContainers = null;
|
|
|
- if (availableContainers == 0 && currentResoureLimits.isAllowPreemption()) {
|
|
|
+ if (availableContainers == 0 && currentResourceLimits.isAllowPreemption()) {
|
|
|
Resource availableAndKillable = Resources.clone(available);
|
|
|
for (RMContainer killableContainer : node
|
|
|
.getKillableContainers().values()) {
|
|
@@ -590,7 +590,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
|
|
|
if (!needToUnreserve) {
|
|
|
// If we shouldn't allocate/reserve new container then we should
|
|
|
// unreserve one the same size we are asking for since the
|
|
|
- // currentResoureLimits.getAmountNeededUnreserve could be zero. If
|
|
|
+ // currentResourceLimits.getAmountNeededUnreserve could be zero. If
|
|
|
// the limit was hit then use the amount we need to unreserve to be
|
|
|
// under the limit.
|
|
|
resourceNeedToUnReserve = capability;
|