Просмотр исходного кода

YARN-11362: Fix several typos in YARN codebase of misspelled resource (#6474) Contributed by EremenkoValentin.

Reviewed-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
Eremenko Valentin 1 год назад
Родитель
Сommit
141627778d
14 измененных файлов с 56 добавлено и 58 удалено
  1. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResourceRequestException.java
  2. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
  3. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineEntityV2Converter.java
  4. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
  5. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java
  6. 2 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
  7. 15 15
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java
  8. 2 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
  9. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java
  10. 7 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java
  11. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java
  12. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
  13. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
  14. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResoureRequestException.java → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/SchedulerInvalidResourceRequestException.java

@@ -29,18 +29,18 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-public class SchedulerInvalidResoureRequestException extends YarnRuntimeException {
+public class SchedulerInvalidResourceRequestException extends YarnRuntimeException {
   private static final long serialVersionUID = 10081123982L;
 
-  public SchedulerInvalidResoureRequestException(String message) {
+  public SchedulerInvalidResourceRequestException(String message) {
     super(message);
   }
 
-  public SchedulerInvalidResoureRequestException(Throwable cause) {
+  public SchedulerInvalidResourceRequestException(Throwable cause) {
     super(cause);
   }
 
-  public SchedulerInvalidResoureRequestException(String message,
+  public SchedulerInvalidResourceRequestException(String message,
       Throwable cause) {
     super(message, cause);
   }

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java

@@ -133,17 +133,17 @@ public class TestYarnCLI {
       ApplicationCLI cli = createAndGetAppCLI();
       ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
       Map<String, Long> resourceSecondsMap = new HashMap<>();
-      Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
+      Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
       resourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(), 123456L);
       resourceSecondsMap.put(ResourceInformation.VCORES.getName(), 4567L);
-      preemptedResoureSecondsMap
+      preemptedResourceSecondsMap
           .put(ResourceInformation.MEMORY_MB.getName(), 1111L);
-      preemptedResoureSecondsMap
+      preemptedResourceSecondsMap
           .put(ResourceInformation.VCORES.getName(), 2222L);
       ApplicationResourceUsageReport usageReport = i == 0 ? null :
           ApplicationResourceUsageReport
               .newInstance(2, 0, null, null, null, resourceSecondsMap, 0, 0,
-                  preemptedResoureSecondsMap);
+                  preemptedResourceSecondsMap);
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/timeline/TimelineEntityV2Converter.java

@@ -395,19 +395,19 @@ public final class TimelineEntityV2Converter {
         }
       }
       Map<String, Long> resourceSecondsMap = new HashMap<>();
-      Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
+      Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
       resourceSecondsMap
           .put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
       resourceSecondsMap
           .put(ResourceInformation.VCORES.getName(), vcoreSeconds);
-      preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
+      preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
           preemptedMemorySeconds);
-      preemptedResoureSecondsMap
+      preemptedResourceSecondsMap
           .put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
 
       appResources = ApplicationResourceUsageReport
           .newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0,
-              preemptedResoureSecondsMap);
+              preemptedResourceSecondsMap);
     }
 
     NavigableSet<TimelineEvent> events = entity.getEvents();

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java

@@ -345,19 +345,19 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         long preemptedVcoreSeconds = parseLong(entityInfo,
             ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS);
         Map<String, Long> resourceSecondsMap = new HashMap<>();
-        Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
+        Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
         resourceSecondsMap
             .put(ResourceInformation.MEMORY_MB.getName(), memorySeconds);
         resourceSecondsMap
             .put(ResourceInformation.VCORES.getName(), vcoreSeconds);
-        preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
+        preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
             preemptedMemorySeconds);
-        preemptedResoureSecondsMap
+        preemptedResourceSecondsMap
             .put(ResourceInformation.VCORES.getName(), preemptedVcoreSeconds);
 
         appResources = ApplicationResourceUsageReport
             .newInstance(0, 0, null, null, null, resourceSecondsMap, 0, 0,
-                preemptedResoureSecondsMap);
+                preemptedResourceSecondsMap);
       }
 
       if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java

@@ -55,7 +55,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
-import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -301,7 +301,7 @@ final class DefaultAMSProcessor implements ApplicationMasterServiceProcessor {
         allocation = getScheduler().allocate(appAttemptId, ask,
             request.getSchedulingRequests(), release,
             blacklistAdditions, blacklistRemovals, containerUpdateRequests);
-      } catch (SchedulerInvalidResoureRequestException e) {
+      } catch (SchedulerInvalidResourceRequestException e) {
         LOG.warn("Exceptions caught when scheduler handling requests");
         throw new YarnException(e);
       }

+ 2 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java

@@ -43,8 +43,7 @@ import org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException
         .InvalidResourceType;
-import org.apache.hadoop.yarn.exceptions
-        .SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.AccessType;
@@ -430,7 +429,7 @@ public class SchedulerUtils {
   public static MaxResourceValidationResult
       validateResourceRequestsAgainstQueueMaxResource(
       ResourceRequest resReq, Resource availableResource)
-      throws SchedulerInvalidResoureRequestException {
+      throws SchedulerInvalidResourceRequestException {
     final Resource reqResource = resReq.getCapability();
     Map<String, ResourceInformation> resourcesWithZeroAmount =
         getZeroResources(availableResource);

+ 15 - 15
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java

@@ -370,11 +370,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       Resource clusterResource, PendingAsk nodeLocalAsk,
       FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
       RMContainer reservedContainer, SchedulingMode schedulingMode,
-      ResourceLimits currentResoureLimits) {
+      ResourceLimits currentResourceLimits) {
     if (canAssign(schedulerKey, node, NodeType.NODE_LOCAL, reservedContainer)) {
       return assignContainer(clusterResource, node, schedulerKey,
           nodeLocalAsk, NodeType.NODE_LOCAL, reservedContainer,
-          schedulingMode, currentResoureLimits);
+          schedulingMode, currentResourceLimits);
     }
 
     // Skip node-local request, go to rack-local request
@@ -385,11 +385,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       Resource clusterResource, PendingAsk rackLocalAsk,
       FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
       RMContainer reservedContainer, SchedulingMode schedulingMode,
-      ResourceLimits currentResoureLimits) {
+      ResourceLimits currentResourceLimits) {
     if (canAssign(schedulerKey, node, NodeType.RACK_LOCAL, reservedContainer)) {
       return assignContainer(clusterResource, node, schedulerKey,
           rackLocalAsk, NodeType.RACK_LOCAL, reservedContainer,
-          schedulingMode, currentResoureLimits);
+          schedulingMode, currentResourceLimits);
     }
 
     // Skip rack-local request, go to off-switch request
@@ -400,11 +400,11 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       Resource clusterResource, PendingAsk offSwitchAsk,
       FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
       RMContainer reservedContainer, SchedulingMode schedulingMode,
-      ResourceLimits currentResoureLimits) {
+      ResourceLimits currentResourceLimits) {
     if (canAssign(schedulerKey, node, NodeType.OFF_SWITCH, reservedContainer)) {
       return assignContainer(clusterResource, node, schedulerKey,
           offSwitchAsk, NodeType.OFF_SWITCH, reservedContainer,
-          schedulingMode, currentResoureLimits);
+          schedulingMode, currentResourceLimits);
     }
 
     application.updateAppSkipNodeDiagnostics(
@@ -419,7 +419,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
   private ContainerAllocation assignContainersOnNode(Resource clusterResource,
       FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
       RMContainer reservedContainer, SchedulingMode schedulingMode,
-      ResourceLimits currentResoureLimits) {
+      ResourceLimits currentResourceLimits) {
     ContainerAllocation allocation;
     NodeType requestLocalityType = null;
 
@@ -431,7 +431,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       allocation =
           assignNodeLocalContainers(clusterResource, nodeLocalAsk,
               node, schedulerKey, reservedContainer, schedulingMode,
-              currentResoureLimits);
+              currentResourceLimits);
       if (Resources.greaterThan(rc, clusterResource,
           allocation.getResourceToBeAllocated(), Resources.none())) {
         allocation.requestLocalityType = requestLocalityType;
@@ -458,7 +458,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       allocation =
           assignRackLocalContainers(clusterResource, rackLocalAsk,
               node, schedulerKey, reservedContainer, schedulingMode,
-              currentResoureLimits);
+              currentResourceLimits);
       if (Resources.greaterThan(rc, clusterResource,
           allocation.getResourceToBeAllocated(), Resources.none())) {
         allocation.requestLocalityType = requestLocalityType;
@@ -485,7 +485,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
       allocation =
           assignOffSwitchContainers(clusterResource, offSwitchAsk,
               node, schedulerKey, reservedContainer, schedulingMode,
-              currentResoureLimits);
+              currentResourceLimits);
 
       // When a returned allocation is LOCALITY_SKIPPED, since we're in
       // off-switch request now, we will skip this app w.r.t priorities 
@@ -507,7 +507,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
   private ContainerAllocation assignContainer(Resource clusterResource,
       FiCaSchedulerNode node, SchedulerRequestKey schedulerKey,
       PendingAsk pendingAsk, NodeType type, RMContainer rmContainer,
-      SchedulingMode schedulingMode, ResourceLimits currentResoureLimits) {
+      SchedulingMode schedulingMode, ResourceLimits currentResourceLimits) {
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("assignContainers: node=" + node.getNodeName()
@@ -547,8 +547,8 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
     // max(required - headroom, amountNeedUnreserve)
     Resource resourceNeedToUnReserve =
         Resources.max(rc, clusterResource,
-            Resources.subtract(capability, currentResoureLimits.getHeadroom()),
-            currentResoureLimits.getAmountNeededUnreserve());
+            Resources.subtract(capability, currentResourceLimits.getHeadroom()),
+            currentResourceLimits.getAmountNeededUnreserve());
 
     boolean needToUnreserve =
         rc.isAnyMajorResourceAboveZero(resourceNeedToUnReserve);
@@ -559,7 +559,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
 
     // Check if we need to kill some containers to allocate this one
     List<RMContainer> toKillContainers = null;
-    if (availableContainers == 0 && currentResoureLimits.isAllowPreemption()) {
+    if (availableContainers == 0 && currentResourceLimits.isAllowPreemption()) {
       Resource availableAndKillable = Resources.clone(available);
       for (RMContainer killableContainer : node
           .getKillableContainers().values()) {
@@ -590,7 +590,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator {
           if (!needToUnreserve) {
             // If we shouldn't allocate/reserve new container then we should
             // unreserve one the same size we are asking for since the
-            // currentResoureLimits.getAmountNeededUnreserve could be zero. If
+            // currentResourceLimits.getAmountNeededUnreserve could be zero. If
             // the limit was hit then use the amount we need to unreserve to be
             // under the limit.
             resourceNeedToUnReserve = capability;

+ 2 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java

@@ -41,8 +41,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.exceptions
-        .SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
@@ -912,7 +911,7 @@ public class FairScheduler extends
     // scheduler would clear them right away and AM
     // would not get this information.
     if (!invalidAsks.isEmpty()) {
-      throw new SchedulerInvalidResoureRequestException(String.format(
+      throw new SchedulerInvalidResourceRequestException(String.format(
               "Resource request is invalid for application %s because queue %s "
                       + "has 0 amount of resource for a resource type! "
                       + "Validation result: %s",

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/LocalityAppPlacementAllocator.java

@@ -23,7 +23,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
@@ -172,8 +172,8 @@ public class LocalityAppPlacementAllocator <N extends SchedulerNode>
       SchedulerRequestKey schedulerRequestKey,
       SchedulingRequest schedulingRequest,
       boolean recoverPreemptedRequestForAContainer)
-      throws SchedulerInvalidResoureRequestException {
-    throw new SchedulerInvalidResoureRequestException(this.getClass().getName()
+      throws SchedulerInvalidResourceRequestException {
+    throw new SchedulerInvalidResourceRequestException(this.getClass().getName()
         + " not be able to handle SchedulingRequest, there exists a "
         + "ResourceRequest with the same scheduler key=" + schedulerRequestKey
         + ", please send SchedulingRequest with a different allocationId and "

+ 7 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SingleConstraintAppPlacementAllocator.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.impl.pb.SchedulingRequestPBImpl;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
-import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
@@ -81,7 +81,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
       Collection<ResourceRequest> requests,
       boolean recoverPreemptedRequestForAContainer) {
     if (requests != null && !requests.isEmpty()) {
-      throw new SchedulerInvalidResoureRequestException(
+      throw new SchedulerInvalidResourceRequestException(
           this.getClass().getName()
               + " not be able to handle ResourceRequest, there exists a "
               + "SchedulingRequest with the same scheduler key="
@@ -98,7 +98,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
       SchedulingRequest newSchedulingRequest, boolean recoverContainer) {
     // When it is a recover container, there must exists an schedulingRequest.
     if (recoverContainer && schedulingRequest == null) {
-      throw new SchedulerInvalidResoureRequestException("Trying to recover a "
+      throw new SchedulerInvalidResourceRequestException("Trying to recover a "
           + "container request=" + newSchedulingRequest.toString() + ", however"
           + "there's no existing scheduling request, this should not happen.");
     }
@@ -127,7 +127,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
       if (!schedulingRequest.equals(newSchedulingRequest)) {
         // Rollback #numAllocations
         sizing.setNumAllocations(newNumAllocations);
-        throw new SchedulerInvalidResoureRequestException(
+        throw new SchedulerInvalidResourceRequestException(
             "Invalid updated SchedulingRequest added to scheduler, "
                 + " we only allows changing numAllocations for the updated "
                 + "SchedulingRequest. Old=" + schedulingRequest.toString()
@@ -148,7 +148,7 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
 
       // Basic sanity check
       if (newNumAllocations < 0) {
-        throw new SchedulerInvalidResoureRequestException(
+        throw new SchedulerInvalidResourceRequestException(
             "numAllocation in ResourceSizing field must be >= 0, "
                 + "updating schedulingRequest failed.");
       }
@@ -197,12 +197,12 @@ public class SingleConstraintAppPlacementAllocator<N extends SchedulerNode>
     sb.append("AppId=").append(appSchedulingInfo.getApplicationId()).append(
         " Key=").append(this.schedulerRequestKey).append(". Exception message:")
         .append(message);
-    throw new SchedulerInvalidResoureRequestException(sb.toString());
+    throw new SchedulerInvalidResourceRequestException(sb.toString());
   }
 
   private void validateAndSetSchedulingRequest(SchedulingRequest
       newSchedulingRequest)
-      throws SchedulerInvalidResoureRequestException {
+      throws SchedulerInvalidResourceRequestException {
     // Check sizing exists
     if (newSchedulingRequest.getResourceSizing() == null
         || newSchedulingRequest.getResourceSizing().getResources() == null) {

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestZKRMStateStore.java

@@ -814,19 +814,19 @@ public class TestZKRMStateStore extends RMStateStoreTestBase {
       ApplicationAttemptId attemptId, Container container, long startTime,
       int amExitStatus) {
     Map<String, Long> resourceSecondsMap = new HashMap<>();
-    Map<String, Long> preemptedResoureSecondsMap = new HashMap<>();
+    Map<String, Long> preemptedResourceSecondsMap = new HashMap<>();
     resourceSecondsMap
         .put(ResourceInformation.MEMORY_MB.getName(), 0L);
     resourceSecondsMap
         .put(ResourceInformation.VCORES.getName(), 0L);
-    preemptedResoureSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
+    preemptedResourceSecondsMap.put(ResourceInformation.MEMORY_MB.getName(),
         0L);
-    preemptedResoureSecondsMap
+    preemptedResourceSecondsMap
         .put(ResourceInformation.VCORES.getName(), 0L);
     return ApplicationAttemptStateData.newInstance(attemptId,
         container, null, startTime, RMAppAttemptState.FINISHED,
         "myTrackingUrl", "attemptDiagnostics", FinalApplicationStatus.SUCCEEDED,
-        amExitStatus, 0, resourceSecondsMap, preemptedResoureSecondsMap, 0);
+        amExitStatus, 0, resourceSecondsMap, preemptedResourceSecondsMap, 0);
   }
 
   private ApplicationAttemptId storeAttempt(RMStateStore store,

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java

@@ -1486,7 +1486,7 @@ public class TestCapacityScheduler {
 
     waitContainerAllocated(am1, 1 * GB, 1, 2, rm1, nm1);
 
-    // Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB
+    // Maximum resource of b1 is 100 * 0.895 * 0.792 = 71 GB
     // 2 GBs used by am, so it's 71 - 2 = 69G.
     Assert.assertEquals(69 * GB,
         am1.doHeartbeat().getAvailableResources().getMemorySize());

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java

@@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
@@ -5513,7 +5513,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
           + resource + " and requested resource capabilities are: "
           + requests.stream().map(ResourceRequest::getCapability)
               .collect(Collectors.toList()));
-    } catch (SchedulerInvalidResoureRequestException e) {
+    } catch (SchedulerInvalidResourceRequestException e) {
       assertTrue(
           "The thrown exception is not the expected one. Exception message: "
               + e.getMessage(),

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/TestSingleConstraintAppPlacementAllocator.java

@@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
 import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTags;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
-import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
+import org.apache.hadoop.yarn.exceptions.SchedulerInvalidResourceRequestException;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
@@ -104,7 +104,7 @@ public class TestSingleConstraintAppPlacementAllocator {
         allocator.initialize(appSchedulingInfo, schedulerRequestKey, rmContext);
       }
       allocator.updatePendingAsk(schedulerRequestKey, schedulingRequest, false);
-    } catch (SchedulerInvalidResoureRequestException e) {
+    } catch (SchedulerInvalidResourceRequestException e) {
       // Expected
       return;
     }