Ver Fonte

MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1163050 13f79535-47bb-0310-9956-ffa450edef68
Mahadev Konar há 13 anos atrás
pai
commit
fb6ecb9b27
21 ficheiros alterados com 58 adições e 51 exclusões
  1. 2 0
      hadoop-mapreduce-project/CHANGES.txt
  2. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
  3. 13 13
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
  4. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
  5. 7 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
  6. 0 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
  7. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
  8. 0 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
  9. 1 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
  10. 2 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
  11. 0 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
  12. 3 3
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
  13. 1 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
  14. 2 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
  15. 9 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
  16. 5 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
  17. 2 2
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
  18. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
  19. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
  20. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
  21. 1 1
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java

+ 2 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -1170,6 +1170,8 @@ Release 0.23.0 - Unreleased
     MAPREDUCE-2737. Update the progress of jobs on client side. (Siddharth Seth
     and Mahadev Konar via mahadev)
 
+    MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java

@@ -44,7 +44,7 @@ public interface TaskRuntimeEstimator {
    * already elapsed.  If the projected total execution time for this task
    * ever exceeds its reasonable execution time, we may speculate it.
    *
-   * @param id the {@link TaskID} of the task we are asking about
+   * @param id the {@link TaskId} of the task we are asking about
    * @return the task's maximum reasonable runtime, or MAX_VALUE if
    *         we don't have enough information to rule out any runtime,
    *         however long.
@@ -57,7 +57,7 @@ public interface TaskRuntimeEstimator {
    * Estimate a task attempt's total runtime.  Includes the time already
    * elapsed.
    *
-   * @param id the {@link TaskAttemptID} of the attempt we are asking about
+   * @param id the {@link TaskAttemptId} of the attempt we are asking about
    * @return our best estimate of the attempt's runtime, or {@code -1} if
    *         we don't have enough information yet to produce an estimate.
    *
@@ -69,7 +69,7 @@ public interface TaskRuntimeEstimator {
    * Estimates how long a new attempt on this task will take if we start
    *  one now
    *
-   * @param id the {@link TaskID} of the task we are asking about
+   * @param id the {@link TaskId} of the task we are asking about
    * @return our best estimate of a new attempt's runtime, or {@code -1} if
    *         we don't have enough information yet to produce an estimate.
    *
@@ -79,9 +79,9 @@ public interface TaskRuntimeEstimator {
   /**
    *
    * Computes the width of the error band of our estimate of the task
-   *  runtime as returned by {@link estimatedRuntime}
+   *  runtime as returned by {@link #estimatedRuntime(TaskAttemptId)}
    *
-   * @param id the {@link TaskAttemptID} of the attempt we are asking about
+   * @param id the {@link TaskAttemptId} of the attempt we are asking about
    * @return our best estimate of the attempt's runtime, or {@code -1} if
    *         we don't have enough information yet to produce an estimate.
    *

+ 13 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java

@@ -127,7 +127,7 @@ public class JobHistoryUtils {
   /**
    * Checks whether the provided path string is a valid job history file.
    * @param pathString the path to be checked.
-   * @return
+   * @return true is the path is a valid job history filename else return false
    */
   public static boolean isValidJobHistoryFileName(String pathString) {
     return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
@@ -148,7 +148,7 @@ public class JobHistoryUtils {
 
   /**
    * Gets a PathFilter which would match configuration files.
-   * @return
+   * @return the patch filter {@link PathFilter} for matching conf files.
    */
   public static PathFilter getConfFileFilter() {
     return CONF_FILTER;
@@ -156,7 +156,7 @@ public class JobHistoryUtils {
   
   /**
    * Gets a PathFilter which would match job history file names.
-   * @return
+   * @return the path filter {@link PathFilter} matching job history files.
    */
   public static PathFilter getHistoryFileFilter() {
     return JOB_HISTORY_FILE_FILTER;
@@ -194,8 +194,8 @@ public class JobHistoryUtils {
   
   /**
    * Gets the configured directory prefix for Done history files.
-   * @param conf
-   * @return
+   * @param conf the configuration object
+   * @return the done history directory
    */
   public static String getConfiguredHistoryServerDoneDirPrefix(
       Configuration conf) {
@@ -209,8 +209,8 @@ public class JobHistoryUtils {
 
   /**
    * Gets the user directory for intermediate done history files.
-   * @param conf
-   * @return
+   * @param conf the configuration object
+   * @return the intermediate done directory for jobhistory files.
    */
   public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
     return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
@@ -262,7 +262,7 @@ public class JobHistoryUtils {
    * @param logDir the log directory prefix.
    * @param jobId the jobId.
    * @param attempt attempt number for this job.
-   * @return
+   * @return the conf file path for jobs in progress.
    */
   public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
     Path jobFilePath = null;
@@ -277,7 +277,7 @@ public class JobHistoryUtils {
    * Gets the serial number part of the path based on the jobId and serialNumber format.
    * @param id
    * @param serialNumberFormat
-   * @return
+   * @return the serial number part of the patch based on the jobId and serial number format.
    */
   public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
     return String.format(serialNumberFormat,
@@ -287,7 +287,7 @@ public class JobHistoryUtils {
   
   /**Extracts the timstamp component from the path.
    * @param path
-   * @return
+   * @return the timestamp component from the path
    */
   public static String getTimestampPartFromPath(String path) {
     Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
@@ -305,7 +305,7 @@ public class JobHistoryUtils {
    * @param id
    * @param timestampComponent
    * @param serialNumberFormat
-   * @return
+   * @return the history sub directory based on the jobid, timestamp and serial number format
    */
   public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
 //    String result = LOG_VERSION_STRING;
@@ -324,7 +324,7 @@ public class JobHistoryUtils {
    * Gets the timestamp component based on millisecond time.
    * @param millisecondTime
    * @param debugMode
-   * @return
+   * @return the timestamp component based on millisecond time
    */
   public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) {
     Calendar timestamp = Calendar.getInstance();
@@ -350,7 +350,7 @@ public class JobHistoryUtils {
   /**
    * Computes a serial number used as part of directory naming for the given jobId.
    * @param id the jobId.
-   * @return
+   * @return the serial number used as part of directory naming for the given jobid
    */
   public static int jobSerialNumber(JobId id) {
     return id.getId();

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java

@@ -372,7 +372,7 @@ public class Counters
    * @param id the id of the counter within the group (0 to N-1)
    * @param name the internal name of the counter
    * @return the counter for that name
-   * @deprecated use {@link findCounter(String, String)} instead
+   * @deprecated use {@link #findCounter(String, String)} instead
    */
   @Deprecated
   public Counter findCounter(String group, int id, String name) {

+ 7 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java

@@ -49,7 +49,7 @@ import org.apache.hadoop.util.ToolRunner;
 
 /**
  * <code>JobClient</code> is the primary interface for the user-job to interact
- * with the {@link JobTracker}.
+ * with the cluster.
  * 
  * <code>JobClient</code> provides facilities to submit jobs, track their 
  * progress, access component-tasks' reports/logs, get the Map-Reduce cluster
@@ -72,7 +72,7 @@ import org.apache.hadoop.util.ToolRunner;
  *   on the distributed file-system. 
  *   </li>
  *   <li>
- *   Submitting the job to the <code>JobTracker</code> and optionally monitoring
+ *   Submitting the job to the cluster and optionally monitoring
  *   it's status.
  *   </li>
  * </ol></p>
@@ -152,7 +152,7 @@ public class JobClient extends CLI {
     /**
      * We store a JobProfile and a timestamp for when we last
      * acquired the job profile.  If the job is null, then we cannot
-     * perform any of the tasks.  The job might be null if the JobTracker
+     * perform any of the tasks.  The job might be null if the cluster
      * has completely forgotten about the job.  (eg, 24 hours after the
      * job completes.)
      */
@@ -348,7 +348,7 @@ public class JobClient extends CLI {
     }
     
     /**
-     * Fetch task completion events from jobtracker for this job. 
+     * Fetch task completion events from cluster for this job. 
      */
     public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
         int startFrom) throws IOException {
@@ -429,7 +429,7 @@ public class JobClient extends CLI {
     
   /**
    * Build a job client with the given {@link JobConf}, and connect to the 
-   * default {@link JobTracker}.
+   * default cluster
    * 
    * @param conf the job configuration.
    * @throws IOException
@@ -440,7 +440,7 @@ public class JobClient extends CLI {
 
   /**
    * Build a job client with the given {@link Configuration}, 
-   * and connect to the default {@link JobTracker}.
+   * and connect to the default cluster
    * 
    * @param conf the configuration.
    * @throws IOException
@@ -450,7 +450,7 @@ public class JobClient extends CLI {
   }
 
   /**
-   * Connect to the default {@link JobTracker}.
+   * Connect to the default cluster
    * @param conf the job configuration.
    * @throws IOException
    */

+ 0 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java

@@ -476,7 +476,6 @@ public class JobConf extends Configuration {
 
   /**
    * Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
-   * @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes()
    */
   @Deprecated
   public void deleteLocalFiles() throws IOException {

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.mapred;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** The states of a {@link TaskInProgress} as seen by the JobTracker.
+/** The states of a Tasks.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable

+ 0 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java

@@ -43,8 +43,6 @@ import org.apache.hadoop.io.Text;
  * 
  * @see TaskID
  * @see TaskAttemptID
- * @see org.apache.hadoop.mapred.JobTracker#getNewJobId()
- * @see org.apache.hadoop.mapred.JobTracker#getStartTime()
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable

+ 1 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java

@@ -22,8 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 /**
  * Place holder for cluster level configuration keys.
  * 
- * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The 
- * keys should have "mapreduce.cluster." as the prefix. 
+ * The keys should have "mapreduce.cluster." as the prefix. 
  *
  */
 @InterfaceAudience.Private

+ 2 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -114,7 +115,7 @@ public interface ContainerManager {
    *
    * @param request request to get <code>ContainerStatus</code> of a container
    *                with the specified <code>ContainerId</code>
-   * @return
+   * @return the <code>ContainerStatus</code> of the container
    * @throws YarnRemoteException
    */
   @Public

+ 0 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java

@@ -113,7 +113,6 @@ public interface AMResponse {
   
   /**
    * Get available headroom for resources in the cluster for the application.
-   * @param limit available headroom for resources in the cluster for the application
    */
   @Public
   @Stable

+ 3 - 3
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java

@@ -50,7 +50,7 @@ public class ConverterUtils {
    * 
    * @param url
    *          url to convert
-   * @return
+   * @return path from {@link URL}
    * @throws URISyntaxException
    */
   public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
@@ -63,8 +63,8 @@ public class ConverterUtils {
   
   /**
    * change from CharSequence to string for map key and value
-   * @param env
-   * @return
+   * @param env map for converting
+   * @return string,string map
    */
   public static Map<String, String> convertToString(
       Map<CharSequence, CharSequence> env) {

+ 1 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java

@@ -221,8 +221,7 @@ public class ProcfsBasedProcessTree {
   }
 
   /** Verify that the given process id is same as its process group id.
-   * @param pidStr Process id of the to-be-verified-process
-   * @param procfsDir  Procfs root dir
+   * @return true if the process id matches else return false.
    */
   public boolean checkPidPgrpidForMatch() {
     return checkPidPgrpidForMatch(pid, PROCFS);

+ 2 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java

@@ -62,7 +62,7 @@ public class RackResolver {
    * right resolver implementation.
    * @param conf
    * @param hostName
-   * @return
+   * @return node {@link Node} after resolving the hostname
    */
   public static Node resolve(Configuration conf, String hostName) {
     init(conf);
@@ -74,7 +74,7 @@ public class RackResolver {
    * network topology. This method doesn't initialize the class.
    * Call {@link #init(Configuration)} explicitly.
    * @param hostName
-   * @return
+   * @return node {@link Node} after resolving the hostname
    */
   public static Node resolve(String hostName) {
     if (!initCalled) {

+ 9 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java

@@ -77,11 +77,18 @@ public abstract class ContainerExecutor implements Configurable {
       List<Path> localDirs)
     throws IOException, InterruptedException;
 
+
   /**
    * Launch the container on the node. This is a blocking call and returns only
    * when the container exits.
-   * 
-   * @param launchCtxt
+   * @param container the container to be launched
+   * @param nmPrivateContainerScriptPath the path for launch script
+   * @param nmPrivateTokensPath the path for tokens for the container
+   * @param user the user of the container
+   * @param appId the appId of the container
+   * @param containerWorkDir the work dir for the container
+   * @return the return status of the launch
+   * @throws IOException
    */
   public abstract int launchContainer(Container container,
       Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,

+ 5 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java

@@ -60,11 +60,14 @@ public class DeletionService extends AbstractService {
     this.exec = exec;
     this.debugDelay = 0;
   }
-
+  
+  /**
+   * 
   /**
    * Delete the path(s) as this user.
    * @param user The user to delete as, or the JVM user if null
-   * @param p Paths to delete
+   * @param subDir the sub directory name
+   * @param baseDirs the base directories which contains the subDir's
    */
   public void delete(String user, Path subDir, Path... baseDirs) {
     // TODO if parent owned by NM, rename within parent inline

+ 2 - 2
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java

@@ -460,7 +460,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
   
   /**
    * return the scheduler.
-   * @return
+   * @return the scheduler for the Resource Manager.
    */
   @Private
   public ResourceScheduler getResourceScheduler() {
@@ -469,7 +469,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
 
   /**
    * return the resource tracking component.
-   * @return
+   * @return the resource tracking component.
    */
   @Private
   public ResourceTrackerService getResourceTrackerService() {

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 
 /**
  * This interface is the one implemented by the schedulers. It mainly extends 
- * {@link ResourceListener} and {@link YarnScheduler}. 
+ * {@link YarnScheduler}. 
  *
  */
 @LimitedPrivate("yarn")

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java

@@ -360,7 +360,7 @@ public class SchedulerApp {
    * given <code>priority</code>?
    * @param node node to be checked
    * @param priority priority of reserved container
-   * @return
+   * @return true is reserved, false if not
    */
   public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
     Map<NodeId, RMContainer> reservedContainers = 

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java

@@ -90,7 +90,7 @@ public class SchedulerNode {
    * given application.
    * 
    * @param applicationId application
-   * @param containers allocated containers
+   * @param rmContainer allocated container
    */
   public synchronized void allocateContainer(ApplicationId applicationId, 
       RMContainer rmContainer) {

+ 1 - 1
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java

@@ -155,7 +155,7 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
    * Assign containers to applications in the queue or it's children (if any).
    * @param clusterResource the resource of the cluster.
    * @param node node on which resources are available
-   * @return
+   * @return the resource that is being assigned.
    */
   public Resource assignContainers(Resource clusterResource, SchedulerNode node);