瀏覽代碼

YARN-1453. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA, Andrew Purtell, and Allen Wittenauer.

(cherry picked from commit 3da9a97cfbcc3a1c50aaf85b1a129d4d269cd5fd)
Tsuyoshi Ozawa 10 年之前
父節點
當前提交
2b2f7f2b90
共有 64 個文件被更改,包括 517 次插入585 次删除
  1. 3 0
      hadoop-yarn-project/CHANGES.txt
  2. 12 32
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java
  3. 0 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java
  4. 12 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java
  5. 35 33
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java
  6. 12 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java
  7. 2 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java
  8. 0 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
  9. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java
  10. 0 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusesRequest.java
  11. 0 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusesResponse.java
  12. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java
  13. 5 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java
  14. 4 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java
  15. 16 17
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java
  16. 5 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java
  17. 5 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java
  18. 9 14
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java
  19. 23 24
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
  20. 26 24
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
  21. 23 26
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java
  22. 17 18
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java
  23. 12 17
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java
  24. 10 11
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java
  25. 16 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java
  26. 15 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java
  27. 23 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java
  28. 12 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java
  29. 16 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PreemptionMessage.java
  30. 5 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java
  31. 12 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java
  32. 7 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java
  33. 6 11
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationRequest.java
  34. 19 19
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationRequestInterpreter.java
  35. 25 26
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java
  36. 3 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  37. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceRequest.java
  38. 9 15
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java
  39. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java
  40. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java
  41. 27 31
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java
  42. 11 12
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java
  43. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
  44. 1 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java
  45. 0 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java
  46. 3 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java
  47. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
  48. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java
  49. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java
  50. 1 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java
  51. 5 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java
  52. 4 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java
  53. 4 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
  54. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java
  55. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java
  56. 11 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeHealthStatus.java
  57. 5 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
  58. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java
  59. 7 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
  60. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
  61. 1 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java
  62. 10 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java
  63. 0 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
  64. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java

+ 3 - 0
hadoop-yarn-project/CHANGES.txt

@@ -709,6 +709,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3171. Sort by Application id, AppAttempt and ContainerID doesn't work
     YARN-3171. Sort by Application id, AppAttempt and ContainerID doesn't work
     in ATS / RM web ui. (Naganarasimha G R via xgong)
     in ATS / RM web ui. (Naganarasimha G R via xgong)
 
 
+    YARN-1453. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in 
+    doc comments. (Akira AJISAKA, Andrew Purtell, and Allen Wittenauer via ozawa)
+
 Release 2.6.0 - 2014-11-18
 Release 2.6.0 - 2014-11-18
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 12 - 32
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationBaseProtocol.java

@@ -65,41 +65,31 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 public interface ApplicationBaseProtocol {
 public interface ApplicationBaseProtocol {
 
 
   /**
   /**
-   * <p>
    * The interface used by clients to get a report of an Application from the
    * The interface used by clients to get a report of an Application from the
    * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
    * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
-   * </p>
-   *
    * <p>
    * <p>
    * The client, via {@link GetApplicationReportRequest} provides the
    * The client, via {@link GetApplicationReportRequest} provides the
    * {@link ApplicationId} of the application.
    * {@link ApplicationId} of the application.
-   * </p>
-   *
    * <p>
    * <p>
    * In secure mode,the <code>ResourceManager</code> or
    * In secure mode,the <code>ResourceManager</code> or
    * <code>ApplicationHistoryServer</code> verifies access to the application,
    * <code>ApplicationHistoryServer</code> verifies access to the application,
    * queue etc. before accepting the request.
    * queue etc. before accepting the request.
-   * </p>
-   *
    * <p>
    * <p>
    * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
    * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
    * responds with a {@link GetApplicationReportResponse} which includes the
    * responds with a {@link GetApplicationReportResponse} which includes the
    * {@link ApplicationReport} for the application.
    * {@link ApplicationReport} for the application.
-   * </p>
-   *
    * <p>
    * <p>
    * If the user does not have <code>VIEW_APP</code> access then the following
    * If the user does not have <code>VIEW_APP</code> access then the following
    * fields in the report will be set to stubbed values:
    * fields in the report will be set to stubbed values:
    * <ul>
    * <ul>
-   * <li>host - set to "N/A"</li>
-   * <li>RPC port - set to -1</li>
-   * <li>client token - set to "N/A"</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL - set to "N/A"</li>
-   * <li>original tracking URL - set to "N/A"</li>
-   * <li>resource usage report - all values are -1</li>
+   *   <li>host - set to "N/A"</li>
+   *   <li>RPC port - set to -1</li>
+   *   <li>client token - set to "N/A"</li>
+   *   <li>diagnostics - set to "N/A"</li>
+   *   <li>tracking URL - set to "N/A"</li>
+   *   <li>original tracking URL - set to "N/A"</li>
+   *   <li>resource usage report - all values are -1</li>
    * </ul>
    * </ul>
-   * </p>
    *
    *
    * @param request
    * @param request
    *          request for an application report
    *          request for an application report
@@ -148,40 +138,30 @@ public interface ApplicationBaseProtocol {
           IOException;
           IOException;
 
 
   /**
   /**
-   * <p>
    * The interface used by clients to get a report of an Application Attempt
    * The interface used by clients to get a report of an Application Attempt
    * from the <code>ResourceManager</code> or
    * from the <code>ResourceManager</code> or
    * <code>ApplicationHistoryServer</code>
    * <code>ApplicationHistoryServer</code>
-   * </p>
-   *
    * <p>
    * <p>
    * The client, via {@link GetApplicationAttemptReportRequest} provides the
    * The client, via {@link GetApplicationAttemptReportRequest} provides the
    * {@link ApplicationAttemptId} of the application attempt.
    * {@link ApplicationAttemptId} of the application attempt.
-   * </p>
-   *
    * <p>
    * <p>
    * In secure mode,the <code>ResourceManager</code> or
    * In secure mode,the <code>ResourceManager</code> or
    * <code>ApplicationHistoryServer</code> verifies access to the method before
    * <code>ApplicationHistoryServer</code> verifies access to the method before
    * accepting the request.
    * accepting the request.
-   * </p>
-   *
    * <p>
    * <p>
    * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
    * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
    * responds with a {@link GetApplicationAttemptReportResponse} which includes
    * responds with a {@link GetApplicationAttemptReportResponse} which includes
    * the {@link ApplicationAttemptReport} for the application attempt.
    * the {@link ApplicationAttemptReport} for the application attempt.
-   * </p>
-   *
    * <p>
    * <p>
    * If the user does not have <code>VIEW_APP</code> access then the following
    * If the user does not have <code>VIEW_APP</code> access then the following
    * fields in the report will be set to stubbed values:
    * fields in the report will be set to stubbed values:
    * <ul>
    * <ul>
-   * <li>host</li>
-   * <li>RPC port</li>
-   * <li>client token</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL</li>
+   *   <li>host</li>
+   *   <li>RPC port</li>
+   *   <li>client token</li>
+   *   <li>diagnostics - set to "N/A"</li>
+   *   <li>tracking URL</li>
    * </ul>
    * </ul>
-   * </p>
    *
    *
    * @param request
    * @param request
    *          request for an application attempt report
    *          request for an application attempt report

+ 0 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationClientProtocol.java

@@ -135,9 +135,6 @@ public interface ApplicationClientProtocol extends ApplicationBaseProtocol {
    * @return (empty) response on accepting the submission
    * @return (empty) response on accepting the submission
    * @throws YarnException
    * @throws YarnException
    * @throws IOException
    * @throws IOException
-   * @throws InvalidResourceRequestException
-   *           The exception is thrown when a {@link ResourceRequest} is out of
-   *           the range of the configured lower and upper resource boundaries.
    * @see #getNewApplication(GetNewApplicationRequest)
    * @see #getNewApplication(GetNewApplicationRequest)
    */
    */
   @Public
   @Public

+ 12 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java

@@ -35,19 +35,18 @@ import org.apache.hadoop.yarn.util.Records;
  * <code>ResourceManager</code> to obtain resources in the cluster.</p> 
  * <code>ResourceManager</code> to obtain resources in the cluster.</p> 
  *
  *
  * <p>The request includes:
  * <p>The request includes:
- *   <ul>
- *     <li>A response id to track duplicate responses.</li>
- *     <li>Progress information.</li>
- *     <li>
- *       A list of {@link ResourceRequest} to inform the 
- *       <code>ResourceManager</code> about the application's 
- *       resource requirements.
- *     </li>
- *     <li>
- *       A list of unused {@link Container} which are being returned. 
- *     </li>
- *   </ul>
- * </p>
+ * <ul>
+ *   <li>A response id to track duplicate responses.</li>
+ *   <li>Progress information.</li>
+ *   <li>
+ *     A list of {@link ResourceRequest} to inform the
+ *     <code>ResourceManager</code> about the application's
+ *     resource requirements.
+ *   </li>
+ *   <li>
+ *     A list of unused {@link Container} which are being returned.
+ *   </li>
+ * </ul>
  * 
  * 
  * @see ApplicationMasterProtocol#allocate(AllocateRequest)
  * @see ApplicationMasterProtocol#allocate(AllocateRequest)
  */
  */

+ 35 - 33
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateResponse.java

@@ -39,27 +39,27 @@ import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The response sent by the <code>ResourceManager</code> the  
- * <code>ApplicationMaster</code> during resource negotiation.</p>
- *
- * <p>The response, includes:
- *   <ul>
- *     <li>Response ID to track duplicate responses.</li>
- *     <li>
- *       An AMCommand sent by ResourceManager to let the <code>ApplicationMaster</code>
- *       take some actions (resync, shutdown etc.).
- *     <li>A list of newly allocated {@link Container}.</li>
- *     <li>A list of completed {@link Container}s' statuses.</li>
- *     <li>
- *       The available headroom for resources in the cluster for the
- *       application. 
- *     </li>
- *     <li>A list of nodes whose status has been updated.</li>
- *     <li>The number of available nodes in a cluster.</li>
- *     <li>A description of resources requested back by the cluster</li>
- *     <li>AMRMToken, if AMRMToken has been rolled over</li>
- *   </ul>
- * </p>
+ * The response sent by the <code>ResourceManager</code> the
+ * <code>ApplicationMaster</code> during resource negotiation.
+ * <p>
+ * The response, includes:
+ * <ul>
+ *   <li>Response ID to track duplicate responses.</li>
+ *   <li>
+ *     An AMCommand sent by ResourceManager to let the
+ *     {@code ApplicationMaster} take some actions (resync, shutdown etc.).
+ *   </li>
+ *   <li>A list of newly allocated {@link Container}.</li>
+ *   <li>A list of completed {@link Container}s' statuses.</li>
+ *   <li>
+ *     The available headroom for resources in the cluster for the
+ *     application.
+ *   </li>
+ *   <li>A list of nodes whose status has been updated.</li>
+ *   <li>The number of available nodes in a cluster.</li>
+ *   <li>A description of resources requested back by the cluster</li>
+ *   <li>AMRMToken, if AMRMToken has been rolled over</li>
+ * </ul>
  * 
  * 
  * @see ApplicationMasterProtocol#allocate(AllocateRequest)
  * @see ApplicationMasterProtocol#allocate(AllocateRequest)
  */
  */
@@ -220,16 +220,16 @@ public abstract class AllocateResponse {
   public abstract void setNumClusterNodes(int numNodes);
   public abstract void setNumClusterNodes(int numNodes);
 
 
   /**
   /**
-   * <p>Get the description of containers owned by the AM, but requested back by
+   * Get the description of containers owned by the AM, but requested back by
    * the cluster. Note that the RM may have an inconsistent view of the
    * the cluster. Note that the RM may have an inconsistent view of the
    * resources owned by the AM. These messages are advisory, and the AM may
    * resources owned by the AM. These messages are advisory, and the AM may
-   * elect to ignore them.<p>
-   *
-   * <p>The message is a snapshot of the resources the RM wants back from the AM.
+   * elect to ignore them.
+   * <p>
+   * The message is a snapshot of the resources the RM wants back from the AM.
    * While demand persists, the RM will repeat its request; applications should
    * While demand persists, the RM will repeat its request; applications should
-   * not interpret each message as a request for <em>additional<em>
+   * not interpret each message as a request for <em>additional</em>
    * resources on top of previous messages. Resources requested consistently
    * resources on top of previous messages. Resources requested consistently
-   * over some duration may be forcibly killed by the RM.<p>
+   * over some duration may be forcibly killed by the RM.
    *
    *
    * @return A specification of the resources to reclaim from this AM.
    * @return A specification of the resources to reclaim from this AM.
    */
    */
@@ -242,15 +242,17 @@ public abstract class AllocateResponse {
   public abstract void setPreemptionMessage(PreemptionMessage request);
   public abstract void setPreemptionMessage(PreemptionMessage request);
 
 
   /**
   /**
-   * <p>Get the list of NMTokens required for communicating with NM. New NMTokens
-   * issued only if<p>
-   * <p>1) AM is receiving first container on underlying NodeManager.<br>
+   * Get the list of NMTokens required for communicating with NM. New NMTokens
+   * issued only if
+   * <p>
+   * 1) AM is receiving first container on underlying NodeManager.<br>
    * OR<br>
    * OR<br>
    * 2) NMToken master key rolled over in ResourceManager and AM is getting new
    * 2) NMToken master key rolled over in ResourceManager and AM is getting new
-   * container on the same underlying NodeManager.<p>
-   * <p>AM will receive one NMToken per NM irrespective of the number of containers
+   * container on the same underlying NodeManager.
+   * <p>
+   * AM will receive one NMToken per NM irrespective of the number of containers
    * issued on same NM. AM is expected to store these tokens until issued a
    * issued on same NM. AM is expected to store these tokens until issued a
-   * new token for the same NM.<p>
+   * new token for the same NM.
    */
    */
   @Public
   @Public
   @Stable
   @Stable

+ 12 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java

@@ -25,19 +25,18 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The finalization request sent by the <code>ApplicationMaster</code> to
- * inform the <code>ResourceManager</code> about its completion.</p>
- *
- * <p>The final request includes details such:
- *   <ul>
- *     <li>Final state of the <code>ApplicationMaster</code></li>
- *     <li>
- *       Diagnostic information in case of failure of the
- *       <code>ApplicationMaster</code>
- *     </li>
- *     <li>Tracking URL</li>
- *   </ul>
- * </p>
+ * The finalization request sent by the {@code ApplicationMaster} to
+ * inform the {@code ResourceManager} about its completion.
+ * <p>
+ * The final request includes details such:
+ * <ul>
+ *   <li>Final state of the {@code ApplicationMaster}</li>
+ *   <li>
+ *     Diagnostic information in case of failure of the
+ *     {@code ApplicationMaster}
+ *   </li>
+ *   <li>Tracking URL</li>
+ * </ul>
  *
  *
  * @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)
  * @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)
  */
  */

+ 2 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterResponse.java

@@ -26,22 +26,19 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
  * The response sent by the <code>ResourceManager</code> to a
  * The response sent by the <code>ResourceManager</code> to a
  * <code>ApplicationMaster</code> on it's completion.
  * <code>ApplicationMaster</code> on it's completion.
- * </p>
- * 
  * <p>
  * <p>
  * The response, includes:
  * The response, includes:
  * <ul>
  * <ul>
  * <li>A flag which indicates that the application has successfully unregistered
  * <li>A flag which indicates that the application has successfully unregistered
  * with the RM and the application can safely stop.</li>
  * with the RM and the application can safely stop.</li>
  * </ul>
  * </ul>
- * </p>
+ * <p>
  * Note: The flag indicates whether the application has successfully
  * Note: The flag indicates whether the application has successfully
  * unregistered and is safe to stop. The application may stop after the flag is
  * unregistered and is safe to stop. The application may stop after the flag is
  * true. If the application stops before the flag is true then the RM may retry
  * true. If the application stops before the flag is true then the RM may retry
- * the application .
+ * the application.
  * 
  * 
  * @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)
  * @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)
  */
  */

+ 0 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.util.Records;
  * <p>The request from clients to get a report of Applications
  * <p>The request from clients to get a report of Applications
  * in the cluster from the <code>ResourceManager</code>.</p>
  * in the cluster from the <code>ResourceManager</code>.</p>
  *
  *
- *
  * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
  * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
  */
  */
 @Public
 @Public

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetClusterMetricsResponse.java

@@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The response sent by the <code>ResourceManager</code> to a client
- * requesting cluster metrics.<p>
+ * The response sent by the <code>ResourceManager</code> to a client
+ * requesting cluster metrics.
  * 
  * 
  * @see YarnClusterMetrics
  * @see YarnClusterMetrics
  * @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)
  * @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)

+ 0 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusesRequest.java

@@ -28,11 +28,9 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
  * The request sent by the <code>ApplicationMaster</code> to the
  * The request sent by the <code>ApplicationMaster</code> to the
  * <code>NodeManager</code> to get {@link ContainerStatus} of requested
  * <code>NodeManager</code> to get {@link ContainerStatus} of requested
  * containers.
  * containers.
- * </p>
  * 
  * 
  * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
  * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
  */
  */

+ 0 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetContainerStatusesResponse.java

@@ -32,11 +32,9 @@ import org.apache.hadoop.yarn.api.records.SerializedException;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
  * The response sent by the <code>NodeManager</code> to the
  * The response sent by the <code>NodeManager</code> to the
  * <code>ApplicationMaster</code> when asked to obtain the
  * <code>ApplicationMaster</code> when asked to obtain the
  * <code>ContainerStatus</code> of requested containers.
  * <code>ContainerStatus</code> of requested containers.
- * </p>
  * 
  * 
  * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
  * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
  */
  */

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoRequest.java

@@ -63,7 +63,7 @@ public abstract class GetQueueInfoRequest {
   public abstract void setQueueName(String queueName);
   public abstract void setQueueName(String queueName);
 
 
   /**
   /**
-   * Is information about <em>active applications<e/m> required?
+   * Is information about <em>active applications</em> required?
    * @return <code>true</code> if applications' information is to be included,
    * @return <code>true</code> if applications' information is to be included,
    *         else <code>false</code>
    *         else <code>false</code>
    */
    */

+ 5 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetQueueInfoResponse.java

@@ -27,12 +27,11 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The response sent by the <code>ResourceManager</code> to a client
- * requesting information about queues in the system.</p>
- *
- * <p>The response includes a {@link QueueInfo} which has details such as
- * queue name, used/total capacities, running applications, child queues etc
- * .</p>
+ * The response sent by the {@code ResourceManager} to a client
+ * requesting information about queues in the system.
+ * <p>
+ * The response includes a {@link QueueInfo} which has details such as
+ * queue name, used/total capacities, running applications, child queues etc.
  * 
  * 
  * @see QueueInfo
  * @see QueueInfo
  * @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)
  * @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)

+ 4 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/KillApplicationResponse.java

@@ -26,21 +26,20 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
  * The response sent by the <code>ResourceManager</code> to the client aborting
  * The response sent by the <code>ResourceManager</code> to the client aborting
  * a submitted application.
  * a submitted application.
- * </p>
  * <p>
  * <p>
  * The response, includes:
  * The response, includes:
  * <ul>
  * <ul>
- * <li>A flag which indicates that the process of killing the application is
- * completed or not.</li>
+ *   <li>
+ *     A flag which indicates that the process of killing the application is
+ *     completed or not.
+ *   </li>
  * </ul>
  * </ul>
  * Note: user is recommended to wait until this flag becomes true, otherwise if
  * Note: user is recommended to wait until this flag becomes true, otherwise if
  * the <code>ResourceManager</code> crashes before the process of killing the
  * the <code>ResourceManager</code> crashes before the process of killing the
  * application is completed, the <code>ResourceManager</code> may retry this
  * application is completed, the <code>ResourceManager</code> may retry this
  * application on recovery.
  * application on recovery.
- * </p>
  * 
  * 
  * @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)
  * @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)
  */
  */

+ 16 - 17
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java

@@ -24,16 +24,15 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The request sent by the <code>ApplicationMaster</code> to 
- * <code>ResourceManager</code> on registration.</p>
- * 
- * <p>The registration includes details such as:
- *   <ul>
- *     <li>Hostname on which the AM is running.</li>
- *     <li>RPC Port</li>
- *     <li>Tracking URL</li>
- *   </ul>
- * </p>
+ * The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
+ * on registration.
+ * <p>
+ * The registration includes details such as:
+ * <ul>
+ *   <li>Hostname on which the AM is running.</li>
+ *   <li>RPC Port</li>
+ *   <li>Tracking URL</li>
+ * </ul>
  * 
  * 
  * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
  * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
  */
  */
@@ -83,20 +82,20 @@ public abstract class RegisterApplicationMasterRequest {
   public abstract void setHost(String host);
   public abstract void setHost(String host);
 
 
   /**
   /**
-   * Get the <em>RPC port</em> on which the <code>ApplicationMaster</code> 
-   * is responding. 
-   * @return the <em>RPC port<em> on which the <code>ApplicationMaster</code> is 
-   *         responding
+   * Get the <em>RPC port</em> on which the {@code ApplicationMaster} is
+   * responding.
+   * @return the <em>RPC port</em> on which the {@code ApplicationMaster}
+   *         is responding
    */
    */
   @Public
   @Public
   @Stable
   @Stable
   public abstract int getRpcPort();
   public abstract int getRpcPort();
   
   
   /**
   /**
-   * Set the <em>RPC port<em> on which the <code>ApplicationMaster</code> is 
+   * Set the <em>RPC port</em> on which the {@code ApplicationMaster} is
    * responding.
    * responding.
-   * @param port <em>RPC port<em> on which the <code>ApplicationMaster</code> is 
-   *             responding
+   * @param port <em>RPC port</em> on which the {@code ApplicationMaster}
+   *             is responding
    */
    */
   @Public
   @Public
   @Stable
   @Stable

+ 5 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterResponse.java

@@ -36,16 +36,15 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>The response sent by the <code>ResourceManager</code> to a new 
- * <code>ApplicationMaster</code> on registration.</p>
- * 
- * <p>The response contains critical details such as:
+ * The response sent by the {@code ResourceManager} to a new
+ * {@code ApplicationMaster} on registration.
+ * <p>
+ * The response contains critical details such as:
  * <ul>
  * <ul>
  *   <li>Maximum capability for allocated resources in the cluster.</li>
  *   <li>Maximum capability for allocated resources in the cluster.</li>
- *   <li><code>ApplicationACL</code>s for the application.</li>
+ *   <li>{@code ApplicationACL}s for the application.</li>
  *   <li>ClientToAMToken master key.</li>
  *   <li>ClientToAMToken master key.</li>
  * </ul>
  * </ul>
- * </p>
  * 
  * 
  * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
  * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
  */
  */

+ 5 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/StartContainerRequest.java

@@ -74,10 +74,11 @@ public abstract class StartContainerRequest {
   public abstract void setContainerLaunchContext(ContainerLaunchContext context);
   public abstract void setContainerLaunchContext(ContainerLaunchContext context);
 
 
   /**
   /**
-   * <p>Get the container token to be used for authorization during starting
-   * container.</p>
-   * <p>Note: {@link NMToken} will be used for authenticating communication with </code>
-   * NodeManager</code>.</p>
+   * Get the container token to be used for authorization during starting
+   * container.
+   * <p>
+   * Note: {@link NMToken} will be used for authenticating communication with
+   * {@code NodeManager}.
    * @return the container token to be used for authorization during starting
    * @return the container token to be used for authorization during starting
    * container.
    * container.
    * @see NMToken
    * @see NMToken

+ 9 - 14
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java

@@ -24,24 +24,19 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
- * <code>ApplicationAttemptReport</code> is a report of an application attempt.
- * </p>
- * 
+ * {@code ApplicationAttemptReport} is a report of an application attempt.
  * <p>
  * <p>
  * It includes details such as:
  * It includes details such as:
  * <ul>
  * <ul>
- * <li>{@link ApplicationAttemptId} of the application.</li>
- * <li>Host on which the <code>ApplicationMaster</code> of this attempt is
- * running.</li>
- * <li>RPC port of the <code>ApplicationMaster</code> of this attempt.</li>
- * <li>Tracking URL.</li>
- * <li>Diagnostic information in case of errors.</li>
- * <li>{@link YarnApplicationAttemptState} of the application attempt.</li>
- * <li>{@link ContainerId} of the master Container.</li>
+ *   <li>{@link ApplicationAttemptId} of the application.</li>
+ *   <li>Host on which the <code>ApplicationMaster</code> of this attempt is
+ *   running.</li>
+ *   <li>RPC port of the <code>ApplicationMaster</code> of this attempt.</li>
+ *   <li>Tracking URL.</li>
+ *   <li>Diagnostic information in case of errors.</li>
+ *   <li>{@link YarnApplicationAttemptState} of the application attempt.</li>
+ *   <li>{@link ContainerId} of the master Container.</li>
  * </ul>
  * </ul>
- * </p>
- * 
  */
  */
 @Public
 @Public
 @Unstable
 @Unstable

+ 23 - 24
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java

@@ -28,23 +28,22 @@ import org.apache.hadoop.yarn.util.Records;
 import java.util.Set;
 import java.util.Set;
 
 
 /**
 /**
- * <p><code>ApplicationReport</code> is a report of an application.</p>
- *
- * <p>It includes details such as:
- *   <ul>
- *     <li>{@link ApplicationId} of the application.</li>
- *     <li>Applications user.</li>
- *     <li>Application queue.</li>
- *     <li>Application name.</li>
- *     <li>Host on which the <code>ApplicationMaster</code> is running.</li>
- *     <li>RPC port of the <code>ApplicationMaster</code>.</li>
- *     <li>Tracking URL.</li>
- *     <li>{@link YarnApplicationState} of the application.</li>
- *     <li>Diagnostic information in case of errors.</li>
- *     <li>Start time of the application.</li>
- *     <li>Client {@link Token} of the application (if security is enabled).</li>
- *   </ul>
- * </p>
+ * {@code ApplicationReport} is a report of an application.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>{@link ApplicationId} of the application.</li>
+ *   <li>Applications user.</li>
+ *   <li>Application queue.</li>
+ *   <li>Application name.</li>
+ *   <li>Host on which the <code>ApplicationMaster</code> is running.</li>
+ *   <li>RPC port of the <code>ApplicationMaster</code>.</li>
+ *   <li>Tracking URL.</li>
+ *   <li>{@link YarnApplicationState} of the application.</li>
+ *   <li>Diagnostic information in case of errors.</li>
+ *   <li>Start time of the application.</li>
+ *   <li>Client {@link Token} of the application (if security is enabled).</li>
+ * </ul>
  *
  *
  * @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)
  * @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)
  */
  */
@@ -341,20 +340,20 @@ public abstract class ApplicationReport {
 
 
   /**
   /**
    * Get the AMRM token of the application.
    * Get the AMRM token of the application.
-   * <p/>
+   * <p>
    * The AMRM token is required for AM to RM scheduling operations. For 
    * The AMRM token is required for AM to RM scheduling operations. For 
    * managed Application Masters Yarn takes care of injecting it. For unmanaged
    * managed Application Masters Yarn takes care of injecting it. For unmanaged
    * Applications Masters, the token must be obtained via this method and set
    * Applications Masters, the token must be obtained via this method and set
    * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
    * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
    * current user.
    * current user.
-   * <p/>
+   * <p>
    * The AMRM token will be returned only if all the following conditions are
    * The AMRM token will be returned only if all the following conditions are
    * met:
    * met:
-   * <li>
-   *   <ul>the requester is the owner of the ApplicationMaster</ul>
-   *   <ul>the application master is an unmanaged ApplicationMaster</ul>
-   *   <ul>the application master is in ACCEPTED state</ul>
-   * </li>
+   * <ul>
+   *   <li>the requester is the owner of the ApplicationMaster</li>
+   *   <li>the application master is an unmanaged ApplicationMaster</li>
+   *   <li>the application master is in ACCEPTED state</li>
+   * </ul>
    * Else this method returns NULL.
    * Else this method returns NULL.
    * 
    * 
    * @return the AM to RM token if available.
    * @return the AM to RM token if available.

+ 26 - 24
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java

@@ -33,32 +33,34 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>ApplicationSubmissionContext</code> represents all of the
- * information needed by the <code>ResourceManager</code> to launch 
- * the <code>ApplicationMaster</code> for an application.</p>
- * 
- * <p>It includes details such as:
- *   <ul>
- *     <li>{@link ApplicationId} of the application.</li>
- *     <li>Application user.</li>
- *     <li>Application name.</li>
- *     <li>{@link Priority} of the application.</li>
- *     <li>
- *       {@link ContainerLaunchContext} of the container in which the 
- *       <code>ApplicationMaster</code> is executed.
- *     </li>
- *     <li>maxAppAttempts. The maximum number of application attempts.
+ * {@code ApplicationSubmissionContext} represents all of the
+ * information needed by the {@code ResourceManager} to launch
+ * the {@code ApplicationMaster} for an application.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>{@link ApplicationId} of the application.</li>
+ *   <li>Application user.</li>
+ *   <li>Application name.</li>
+ *   <li>{@link Priority} of the application.</li>
+ *   <li>
+ *     {@link ContainerLaunchContext} of the container in which the
+ *     <code>ApplicationMaster</code> is executed.
+ *   </li>
+ *   <li>
+ *     maxAppAttempts. The maximum number of application attempts.
  *     It should be no larger than the global number of max attempts in the
  *     It should be no larger than the global number of max attempts in the
- *     Yarn configuration.</li>
- *     <li>attemptFailuresValidityInterval. The default value is -1.
- *     when attemptFailuresValidityInterval in milliseconds is set to > 0,
- *     the failure number will no take failures which happen out of the
- *     validityInterval into failure count. If failure count reaches to
- *     maxAppAttempts, the application will be failed.
- *     </li>
+ *     Yarn configuration.
+ *   </li>
+ *   <li>
+ *     attemptFailuresValidityInterval. The default value is -1.
+ *     when attemptFailuresValidityInterval in milliseconds is set to
+ *     {@literal >} 0, the failure number will no take failures which happen
+ *     out of the validityInterval into failure count. If failure count
+ *     reaches to maxAppAttempts, the application will be failed.
+ *   </li>
  *   <li>Optional, application-specific {@link LogAggregationContext}</li>
  *   <li>Optional, application-specific {@link LogAggregationContext}</li>
- *   </ul>
- * </p>
+ * </ul>
  * 
  * 
  * @see ContainerLaunchContext
  * @see ContainerLaunchContext
  * @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)
  * @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)

+ 23 - 26
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java

@@ -27,34 +27,31 @@ import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>Container</code> represents an allocated resource in the cluster.
- * </p>
- * 
- * <p>The <code>ResourceManager</code> is the sole authority to allocate any
- * <code>Container</code> to applications. The allocated <code>Container</code>
+ * {@code Container} represents an allocated resource in the cluster.
+ * <p>
+ * The {@code ResourceManager} is the sole authority to allocate any
+ * {@code Container} to applications. The allocated {@code Container}
  * is always on a single node and has a unique {@link ContainerId}. It has
  * is always on a single node and has a unique {@link ContainerId}. It has
- * a specific amount of {@link Resource} allocated.</p>
- * 
- * <p>It includes details such as:
- *   <ul>
- *     <li>{@link ContainerId} for the container, which is globally unique.</li>
- *     <li>
- *       {@link NodeId} of the node on which it is allocated.
- *     </li>
- *     <li>HTTP uri of the node.</li>
- *     <li>{@link Resource} allocated to the container.</li>
- *     <li>{@link Priority} at which the container was allocated.</li>
- *     <li>
- *       Container {@link Token} of the container, used to securely verify
- *       authenticity of the allocation. 
- *     </li>
- *   </ul>
- * </p>
+ * a specific amount of {@link Resource} allocated.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>{@link ContainerId} for the container, which is globally unique.</li>
+ *   <li>
+ *     {@link NodeId} of the node on which it is allocated.
+ *   </li>
+ *   <li>HTTP uri of the node.</li>
+ *   <li>{@link Resource} allocated to the container.</li>
+ *   <li>{@link Priority} at which the container was allocated.</li>
+ *   <li>
+ *     Container {@link Token} of the container, used to securely verify
+ *     authenticity of the allocation.
+ *   </li>
+ * </ul>
  * 
  * 
- * <p>Typically, an <code>ApplicationMaster</code> receives the 
- * <code>Container</code> from the <code>ResourceManager</code> during
- * resource-negotiation and then talks to the <code>NodeManager</code> to 
- * start/stop containers.</p>
+ * Typically, an {@code ApplicationMaster} receives the {@code Container}
+ * from the {@code ResourceManager} during resource-negotiation and then
+ * talks to the {@code NodeManager} to start/stop containers.
  * 
  * 
  * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
  * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
  * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
  * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)

+ 17 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java

@@ -30,24 +30,23 @@ import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>ContainerLaunchContext</code> represents all of the information
- * needed by the <code>NodeManager</code> to launch a container.</p>
- * 
- * <p>It includes details such as:
- *   <ul>
- *     <li>{@link ContainerId} of the container.</li>
- *     <li>{@link Resource} allocated to the container.</li>
- *     <li>User to whom the container is allocated.</li>
- *     <li>Security tokens (if security is enabled).</li>
- *     <li>
- *       {@link LocalResource} necessary for running the container such
- *       as binaries, jar, shared-objects, side-files etc. 
- *     </li>
- *     <li>Optional, application-specific binary service data.</li>
- *     <li>Environment variables for the launched process.</li>
- *     <li>Command to launch the container.</li>
- *   </ul>
- * </p>
+ * {@code ContainerLaunchContext} represents all of the information
+ * needed by the {@code NodeManager} to launch a container.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>{@link ContainerId} of the container.</li>
+ *   <li>{@link Resource} allocated to the container.</li>
+ *   <li>User to whom the container is allocated.</li>
+ *   <li>Security tokens (if security is enabled).</li>
+ *   <li>
+ *     {@link LocalResource} necessary for running the container such
+ *     as binaries, jar, shared-objects, side-files etc.
+ *   </li>
+ *   <li>Optional, application-specific binary service data.</li>
+ *   <li>Environment variables for the launched process.</li>
+ *   <li>Command to launch the container.</li>
+ * </ul>
  * 
  * 
  * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
  * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
  */
  */

+ 12 - 17
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerReport.java

@@ -24,27 +24,22 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
- * <code>ContainerReport</code> is a report of an container.
- * </p>
- * 
+ * {@code ContainerReport} is a report of an container.
  * <p>
  * <p>
  * It includes details such as:
  * It includes details such as:
  * <ul>
  * <ul>
- * <li>{@link ContainerId} of the container.</li>
- * <li>Allocated Resources to the container.</li>
- * <li>Assigned Node id.</li>
- * <li>Assigned Priority.</li>
- * <li>Creation Time.</li>
- * <li>Finish Time.</li>
- * <li>Container Exit Status.</li>
- * <li>{@link ContainerState} of the container.</li>
- * <li>Diagnostic information in case of errors.</li>
- * <li>Log URL.</li>
- * <li>nodeHttpAddress</li>
+ *   <li>{@link ContainerId} of the container.</li>
+ *   <li>Allocated Resources to the container.</li>
+ *   <li>Assigned Node id.</li>
+ *   <li>Assigned Priority.</li>
+ *   <li>Creation Time.</li>
+ *   <li>Finish Time.</li>
+ *   <li>Container Exit Status.</li>
+ *   <li>{@link ContainerState} of the container.</li>
+ *   <li>Diagnostic information in case of errors.</li>
+ *   <li>Log URL.</li>
+ *   <li>nodeHttpAddress</li>
  * </ul>
  * </ul>
- * </p>
- * 
  */
  */
 
 
 @Public
 @Public

+ 10 - 11
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java

@@ -25,17 +25,16 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>ContainerStatus</code> represents the current status of a 
- * <code>Container</code>.</p>
- * 
- * <p>It provides details such as:
- *   <ul>
- *     <li><code>ContainerId</code> of the container.</li>
- *     <li><code>ContainerState</code> of the container.</li>
- *     <li><em>Exit status</em> of a completed container.</li>
- *     <li><em>Diagnostic</em> message for a failed container.</li>
- *   </ul>
- * </p>
+ * {@code ContainerStatus} represents the current status of a
+ * {@code Container}.
+ * <p>
+ * It provides details such as:
+ * <ul>
+ *   <li>{@code ContainerId} of the container.</li>
+ *   <li>{@code ContainerState} of the container.</li>
+ *   <li><em>Exit status</em> of a completed container.</li>
+ *   <li><em>Diagnostic</em> message for a failed container.</li>
+ * </ul>
  */
  */
 @Public
 @Public
 @Stable
 @Stable

+ 16 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceType.java

@@ -23,22 +23,22 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 
 
 /**
 /**
- * <p><code>LocalResourceType</code> specifies the <em>type</em>
- * of a resource localized by the <code>NodeManager</code>.</p> 
- *
- * <p>The <em>type</em> can be one of:
- *   <ul>
- *     <li>
- *       {@link #FILE} - Regular file i.e. uninterpreted bytes. 
- *     </li>
- *     <li>
- *       {@link #ARCHIVE} - Archive, which is automatically unarchived by the 
- *       <code>NodeManager</code>.
- *     </li>
- *     <li>
- *       {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
- *   </ul>
- * </p>
+ * {@code LocalResourceType} specifies the <em>type</em>
+ * of a resource localized by the {@code NodeManager}.
+ * <p>
+ * The <em>type</em> can be one of:
+ * <ul>
+ *   <li>
+ *     {@link #FILE} - Regular file i.e. uninterpreted bytes.
+ *   </li>
+ *   <li>
+ *     {@link #ARCHIVE} - Archive, which is automatically unarchived by the
+ *     <code>NodeManager</code>.
+ *   </li>
+ *   <li>
+ *     {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
+ *   </li>
+ * </ul>
  *
  *
  * @see LocalResource
  * @see LocalResource
  * @see ContainerLaunchContext
  * @see ContainerLaunchContext

+ 15 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LocalResourceVisibility.java

@@ -23,22 +23,21 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
 
 
 /**
 /**
- * <p><code>LocalResourceVisibility</code> specifies the <em>visibility</em>
- * of a resource localized by the <code>NodeManager</code>.</p>
- * 
- * <p>The <em>visibility</em> can be one of:
- *   <ul>
- *     <li>{@link #PUBLIC} - Shared by all users on the node.</li>
- *     <li>
- *       {@link #PRIVATE} - Shared among all applications of the 
- *       <em>same user</em> on the node.
- *     </li>
- *     <li>
- *       {@link #APPLICATION} - Shared only among containers of the 
- *       <em>same application</em> on the node.
- *     </li>
- *   </ul>
- * </p>
+ * {@code LocalResourceVisibility} specifies the <em>visibility</em>
+ * of a resource localized by the {@code NodeManager}.
+ * <p>
+ * The <em>visibility</em> can be one of:
+ * <ul>
+ *   <li>{@link #PUBLIC} - Shared by all users on the node.</li>
+ *   <li>
+ *     {@link #PRIVATE} - Shared among all applications of the
+ *     <em>same user</em> on the node.
+ *   </li>
+ *   <li>
+ *     {@link #APPLICATION} - Shared only among containers of the
+ *     <em>same application</em> on the node.
+ *   </li>
+ * </ul>
  * 
  * 
  * @see LocalResource
  * @see LocalResource
  * @see ContainerLaunchContext
  * @see ContainerLaunchContext

+ 23 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/LogAggregationContext.java

@@ -24,30 +24,37 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>LogAggregationContext</code> represents all of the
- * information needed by the <code>NodeManager</code> to handle
- * the logs for an application.</p>
- *
- * <p>It includes details such as:
- *   <ul>
- *     <li>includePattern. It uses Java Regex to filter the log files
+ * {@code LogAggregationContext} represents all of the
+ * information needed by the {@code NodeManager} to handle
+ * the logs for an application.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>
+ *     includePattern. It uses Java Regex to filter the log files
  *     which match the defined include pattern and those log files
  *     which match the defined include pattern and those log files
- *     will be uploaded when the application finishes. </li>
- *     <li>excludePattern. It uses Java Regex to filter the log files
+ *     will be uploaded when the application finishes.
+ *   </li>
+ *   <li>
+ *     excludePattern. It uses Java Regex to filter the log files
  *     which match the defined exclude pattern and those log files
  *     which match the defined exclude pattern and those log files
  *     will not be uploaded when application finishes. If the log file
  *     will not be uploaded when application finishes. If the log file
  *     name matches both the include and the exclude pattern, this file
  *     name matches both the include and the exclude pattern, this file
- *     will be excluded eventually</li>
- *     <li>rolledLogsIncludePattern. It uses Java Regex to filter the log files
+ *     will be excluded eventually.
+ *   </li>
+ *   <li>
+ *     rolledLogsIncludePattern. It uses Java Regex to filter the log files
  *     which match the defined include pattern and those log files
  *     which match the defined include pattern and those log files
- *     will be aggregated in a rolling fashion.</li>
- *     <li>rolledLogsExcludePattern. It uses Java Regex to filter the log files
+ *     will be aggregated in a rolling fashion.
+ *   </li>
+ *   <li>
+ *     rolledLogsExcludePattern. It uses Java Regex to filter the log files
  *     which match the defined exclude pattern and those log files
  *     which match the defined exclude pattern and those log files
  *     will not be aggregated in a rolling fashion. If the log file
  *     will not be aggregated in a rolling fashion. If the log file
  *     name matches both the include and the exclude pattern, this file
  *     name matches both the include and the exclude pattern, this file
- *     will be excluded eventually</li>
- *   </ul>
- * </p>
+ *     will be excluded eventually.
+ *   </li>
+ * </ul>
  *
  *
  * @see ApplicationSubmissionContext
  * @see ApplicationSubmissionContext
  */
  */

+ 12 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeReport.java

@@ -28,19 +28,18 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>NodeReport</code> is a summary of runtime information of a 
- * node in the cluster.</p>
- * 
- * <p>It includes details such as:
- *   <ul>
- *     <li>{@link NodeId} of the node.</li>
- *     <li>HTTP Tracking URL of the node.</li>
- *     <li>Rack name for the node.</li>
- *     <li>Used {@link Resource} on the node.</li>
- *     <li>Total available {@link Resource} of the node.</li>
- *     <li>Number of running containers on the node.</li>
- *   </ul>
- * </p>
+ * {@code NodeReport} is a summary of runtime information of a node
+ * in the cluster.
+ * <p>
+ * It includes details such as:
+ * <ul>
+ *   <li>{@link NodeId} of the node.</li>
+ *   <li>HTTP Tracking URL of the node.</li>
+ *   <li>Rack name for the node.</li>
+ *   <li>Used {@link Resource} on the node.</li>
+ *   <li>Total available {@link Resource} of the node.</li>
+ *   <li>Number of running containers on the node.</li>
+ * </ul>
  *
  *
  * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
  * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
  */
  */

+ 16 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/PreemptionMessage.java

@@ -24,36 +24,36 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by
+ * A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by
  * the RM to specify resources that the RM wants to reclaim from this
  * the RM to specify resources that the RM wants to reclaim from this
- * <code>ApplicationMaster</code> (AM). The AM receives a {@link
+ * {@code ApplicationMaster} (AM). The AM receives a {@link
  * StrictPreemptionContract} message encoding which containers the platform may
  * StrictPreemptionContract} message encoding which containers the platform may
  * forcibly kill, granting it an opportunity to checkpoint state or adjust its
  * forcibly kill, granting it an opportunity to checkpoint state or adjust its
  * execution plan. The message may also include a {@link PreemptionContract}
  * execution plan. The message may also include a {@link PreemptionContract}
  * granting the AM more latitude in selecting which resources to return to the
  * granting the AM more latitude in selecting which resources to return to the
- * cluster.<p>
- *
- * <p>The AM should decode both parts of the message. The {@link
+ * cluster.
+ * <p>
+ * The AM should decode both parts of the message. The {@link
  * StrictPreemptionContract} specifies particular allocations that the RM
  * StrictPreemptionContract} specifies particular allocations that the RM
  * requires back. The AM can checkpoint containers' state, adjust its execution
  * requires back. The AM can checkpoint containers' state, adjust its execution
  * plan to move the computation, or take no action and hope that conditions that
  * plan to move the computation, or take no action and hope that conditions that
- * caused the RM to ask for the container will change.<p>
- *
- * <p>In contrast, the {@link PreemptionContract} also includes a description of
+ * caused the RM to ask for the container will change.
+ * <p>
+ * In contrast, the {@link PreemptionContract} also includes a description of
  * resources with a set of containers. If the AM releases containers matching
  * resources with a set of containers. If the AM releases containers matching
  * that profile, then the containers enumerated in {@link
  * that profile, then the containers enumerated in {@link
- * PreemptionContract#getContainers()} may not be killed.<p>
- *
- * <p>Each preemption message reflects the RM's current understanding of the
- * cluster state, so a request to return <emph>N</emph> containers may not
+ * PreemptionContract#getContainers()} may not be killed.
+ * <p>
+ * Each preemption message reflects the RM's current understanding of the
+ * cluster state, so a request to return <em>N</em> containers may not
  * reflect containers the AM is releasing, recently exited containers the RM has
  * reflect containers the AM is releasing, recently exited containers the RM has
  * yet to learn about, or new containers allocated before the message was
  * yet to learn about, or new containers allocated before the message was
  * generated. Conversely, an RM may request a different profile of containers in
  * generated. Conversely, an RM may request a different profile of containers in
- * subsequent requests.<p>
- *
- * <p>The policy enforced by the RM is part of the scheduler. Generally, only
+ * subsequent requests.
+ * <p>
+ * The policy enforced by the RM is part of the scheduler. Generally, only
  * containers that have been requested consistently should be killed, but the
  * containers that have been requested consistently should be killed, but the
- * details are not specified.<p>
+ * details are not specified.
  */
  */
 @Public
 @Public
 @Evolving
 @Evolving

+ 5 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueACL.java

@@ -23,18 +23,15 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 
 
 /**
 /**
- * <p>
- * <code>QueueACL</code> enumerates the various ACLs for queues.
- * </p>
- * 
+ * {@code QueueACL} enumerates the various ACLs for queues.
  * <p>
  * <p>
  * The ACL is one of:
  * The ACL is one of:
  * <ul>
  * <ul>
- * <li>{@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the
- * queue.</li>
- * <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li>
+ *   <li>
+ *     {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the queue.
+ *   </li>
+ *   <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li>
  * </ul>
  * </ul>
- * </p>
  * 
  * 
  * @see QueueInfo
  * @see QueueInfo
  * @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)
  * @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)

+ 12 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueInfo.java

@@ -29,19 +29,18 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>QueueInfo is a report of the runtime information of the queue.</p>
- * 
- * <p>It includes information such as:
- *   <ul>
- *     <li>Queue name.</li>
- *     <li>Capacity of the queue.</li>
- *     <li>Maximum capacity of the queue.</li>
- *     <li>Current capacity of the queue.</li>
- *     <li>Child queues.</li>
- *     <li>Running applications.</li>
- *     <li>{@link QueueState} of the queue.</li>
- *   </ul>
- * </p>
+ * QueueInfo is a report of the runtime information of the queue.
+ * <p>
+ * It includes information such as:
+ * <ul>
+ *   <li>Queue name.</li>
+ *   <li>Capacity of the queue.</li>
+ *   <li>Maximum capacity of the queue.</li>
+ *   <li>Current capacity of the queue.</li>
+ *   <li>Child queues.</li>
+ *   <li>Running applications.</li>
+ *   <li>{@link QueueState} of the queue.</li>
+ * </ul>
  *
  *
  * @see QueueState
  * @see QueueState
  * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)
  * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)

+ 7 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueState.java

@@ -23,14 +23,13 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 
 
 /**
 /**
- * <p>State of a Queue.</p>
- * 
- * <p>A queue is in one of:
- *   <ul>
- *     <li>{@link #RUNNING} - normal state.</li> 
- *     <li>{@link #STOPPED} - not accepting new application submissions.
- *   </ul>
- * </p>
+ * State of a Queue.
+ * <p>
+ * A queue is in one of:
+ * <ul>
+ *   <li>{@link #RUNNING} - normal state.</li>
+ *   <li>{@link #STOPPED} - not accepting new application submissions.</li>
+ * </ul>
  * 
  * 
  * @see QueueInfo
  * @see QueueInfo
  * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)
  * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)

+ 6 - 11
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationRequest.java

@@ -25,23 +25,18 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p>
  * {@link ReservationRequest} represents the request made by an application to
  * {@link ReservationRequest} represents the request made by an application to
  * the {@code ResourceManager} to reserve {@link Resource}s.
  * the {@code ResourceManager} to reserve {@link Resource}s.
- * </p>
- * 
  * <p>
  * <p>
  * It includes:
  * It includes:
  * <ul>
  * <ul>
- * <li>{@link Resource} required for each request.</li>
- * <li>
- * Number of containers, of above specifications, which are required by the
- * application.</li>
- * <li>
- * Concurrency that indicates the gang size of the request.</li>
+ *   <li>{@link Resource} required for each request.</li>
+ *   <li>
+ *     Number of containers, of above specifications, which are required by the
+ *     application.
+ *   </li>
+ *   <li>Concurrency that indicates the gang size of the request.</li>
  * </ul>
  * </ul>
- * </p>
- * 
  */
  */
 @Public
 @Public
 @Unstable
 @Unstable

+ 19 - 19
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ReservationRequestInterpreter.java

@@ -33,14 +33,13 @@ public enum ReservationRequestInterpreter {
    * Requires that exactly ONE among the {@link ReservationRequest} submitted as
    * Requires that exactly ONE among the {@link ReservationRequest} submitted as
    * of a {@link ReservationDefinition} is satisfied to satisfy the overall
    * of a {@link ReservationDefinition} is satisfied to satisfy the overall
    * {@link ReservationDefinition}.
    * {@link ReservationDefinition}.
-   * 
+   * <p>
    * WHEN TO USE THIS: This is useful when the user have multiple equivalent
    * WHEN TO USE THIS: This is useful when the user have multiple equivalent
    * ways to run an application, and wants to expose to the ReservationAgent
    * ways to run an application, and wants to expose to the ReservationAgent
-   * such flexibility. For example an application could use one <32GB,16core>
-   * container for 10min, or 16 <2GB,1core> containers for 15min, the
-   * ReservationAgent will decide which one of the two it is best for the system
-   * to place.
-   * 
+   * such flexibility. For example an application could use one
+   * {@literal <32GB,16core>} container for 10min, or 16 {@literal <2GB,1core>}
+   * containers for 15min, the ReservationAgent will decide which one of the
+   * two it is best for the system to place.
    */
    */
   R_ANY,
   R_ANY,
 
 
@@ -49,16 +48,16 @@ public enum ReservationRequestInterpreter {
    * {@link ReservationDefinition} are satisfied for the overall
    * {@link ReservationDefinition} are satisfied for the overall
    * {@link ReservationDefinition} to be satisfied. No constraints are imposed
    * {@link ReservationDefinition} to be satisfied. No constraints are imposed
    * on the temporal ordering of the allocation used to satisfy the
    * on the temporal ordering of the allocation used to satisfy the
-   * ResourceRequeusts.
-   * 
+   * ResourceRequests.
+   * <p>
    * WHEN TO USE THIS: This is useful to capture a scenario in which the user
    * WHEN TO USE THIS: This is useful to capture a scenario in which the user
    * cares for multiple ReservationDefinition to be all accepted, or none. For
    * cares for multiple ReservationDefinition to be all accepted, or none. For
-   * example, a user might want a reservation R1: with 10 x <8GB,4core> for
-   * 10min, and a reservation R2: with 2 <1GB,1core> for 1h, and only if both
-   * are satisfied the workflow run in this reservation succeeds. The key
-   * differentiator from ALL and ORDER, ORDER_NO_GAP, is that ALL imposes no
-   * restrictions on the relative allocations used to place R1 and R2 above.
-   * 
+   * example, a user might want a reservation R1: with 10 x
+   * {@literal <8GB,4core>} for 10min, and a reservation R2:
+   * with 2 {@literal <1GB,1core>} for 1h, and only if both are satisfied
+   * the workflow run in this reservation succeeds. The key differentiator
+   * from ALL and ORDER, ORDER_NO_GAP, is that ALL imposes no restrictions
+   * on the relative allocations used to place R1 and R2 above.
    */
    */
   R_ALL,
   R_ALL,
 
 
@@ -73,15 +72,16 @@ public enum ReservationRequestInterpreter {
    * constraints are imposed on temporal gaps between subsequent allocations
    * constraints are imposed on temporal gaps between subsequent allocations
    * (the last instant of the previous allocation can be an arbitrary long
    * (the last instant of the previous allocation can be an arbitrary long
    * period of time before the first instant of the subsequent allocation).
    * period of time before the first instant of the subsequent allocation).
-   * 
+   * <p>
    * WHEN TO USE THIS: Like ALL this requires all ReservationDefinitions to be
    * WHEN TO USE THIS: Like ALL this requires all ReservationDefinitions to be
    * placed, but it also imposes a time ordering on the allocations used. This
    * placed, but it also imposes a time ordering on the allocations used. This
    * is important if the ReservationDefinition(s) are used to describe a
    * is important if the ReservationDefinition(s) are used to describe a
    * workflow with inherent inter-stage dependencies. For example, a first job
    * workflow with inherent inter-stage dependencies. For example, a first job
-   * runs in a ReservaitonDefinition R1 (10 x <1GB,1core> for 20min), and its
-   * output is consumed by a second job described by a ReservationDefinition R2
-   * (5 x <1GB,1core>) for 50min). R2 allocation cannot overlap R1, as R2 models
-   * a job depending on the output of the job modeled by R1.
+   * runs in a ReservaitonDefinition R1 (10 x {@literal <1GB,1core>}
+   * for 20min), and its output is consumed by a second job described by
+   * a ReservationDefinition R2 (5 x {@literal <1GB,1core>}) for 50min).
+   * R2 allocation cannot overlap R1, as R2 models a job depending on
+   * the output of the job modeled by R1.
    */
    */
   R_ORDER,
   R_ORDER,
 
 

+ 25 - 26
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java

@@ -27,31 +27,30 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>ResourceRequest</code> represents the request made by an
- * application to the <code>ResourceManager</code> to obtain various 
- * <code>Container</code> allocations.</p>
- * 
- * <p>It includes:
- *   <ul>
- *     <li>{@link Priority} of the request.</li>
- *     <li>
- *       The <em>name</em> of the machine or rack on which the allocation is 
- *       desired. A special value of <em>*</em> signifies that 
- *       <em>any</em> host/rack is acceptable to the application.
- *     </li>
- *     <li>{@link Resource} required for each request.</li>
- *     <li>
- *       Number of containers, of above specifications, which are required 
- *       by the application.
- *     </li>
- *     <li>
- *       A boolean <em>relaxLocality</em> flag, defaulting to <code>true</code>,
- *       which tells the <code>ResourceManager</code> if the application wants
- *       locality to be loose (i.e. allows fall-through to rack or <em>any</em>)
- *       or strict (i.e. specify hard constraint on resource allocation).
- *     </li>
- *   </ul>
- * </p>
+ * {@code ResourceRequest} represents the request made
+ * by an application to the {@code ResourceManager}
+ * to obtain various {@code Container} allocations.
+ * <p>
+ * It includes:
+ * <ul>
+ *   <li>{@link Priority} of the request.</li>
+ *   <li>
+ *     The <em>name</em> of the machine or rack on which the allocation is
+ *     desired. A special value of <em>*</em> signifies that
+ *     <em>any</em> host/rack is acceptable to the application.
+ *   </li>
+ *   <li>{@link Resource} required for each request.</li>
+ *   <li>
+ *     Number of containers, of above specifications, which are required
+ *     by the application.
+ *   </li>
+ *   <li>
+ *     A boolean <em>relaxLocality</em> flag, defaulting to {@code true},
+ *     which tells the {@code ResourceManager} if the application wants
+ *     locality to be loose (i.e. allows fall-through to rack or <em>any</em>)
+ *     or strict (i.e. specify hard constraint on resource allocation).
+ *   </li>
+ * </ul>
  * 
  * 
  * @see Resource
  * @see Resource
  * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
  * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
@@ -267,7 +266,7 @@ public abstract class ResourceRequest implements Comparable<ResourceRequest> {
   /**
   /**
    * Set node label expression of this resource request. Now only support
    * Set node label expression of this resource request. Now only support
    * specifying a single node label. In the future we will support more complex
    * specifying a single node label. In the future we will support more complex
-   * node label expression specification like AND(&&), OR(||), etc.
+   * node label expression specification like {@code AND(&&), OR(||)}, etc.
    * 
    * 
    * Any please note that node label expression now can only take effect when
    * Any please note that node label expression now can only take effect when
    * the resource request has resourceName = ANY
    * the resource request has resourceName = ANY

+ 3 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -733,8 +733,9 @@ public class YarnConfiguration extends Configuration {
   
   
   /**
   /**
    * How long to wait between aggregated log retention checks. If set to
    * How long to wait between aggregated log retention checks. If set to
-   * a value <= 0 then the value is computed as one-tenth of the log retention
-   * setting. Be careful set this too small and you will spam the name node.
+   * a value {@literal <=} 0 then the value is computed as one-tenth of the
+   * log retention setting. Be careful set this too small and you will spam
+   * the name node.
    */
    */
   public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS =
   public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS =
       YARN_PREFIX + "log-aggregation.retain-check-interval-seconds";
       YARN_PREFIX + "log-aggregation.retain-check-interval-seconds";

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/UpdateNodeResourceRequest.java

@@ -54,7 +54,7 @@ public abstract class UpdateNodeResourceRequest {
   
   
   /**
   /**
    * Get the map from <code>NodeId</code> to <code>ResourceOption</code>.
    * Get the map from <code>NodeId</code> to <code>ResourceOption</code>.
-   * @return the map of <NodeId, ResourceOption>
+   * @return the map of {@code <NodeId, ResourceOption>}
    */
    */
   @Public
   @Public
   @Evolving
   @Evolving
@@ -62,7 +62,7 @@ public abstract class UpdateNodeResourceRequest {
   
   
   /**
   /**
    * Set the map from <code>NodeId</code> to <code>ResourceOption</code>.
    * Set the map from <code>NodeId</code> to <code>ResourceOption</code>.
-   * @param nodeResourceMap the map of <NodeId, ResourceOption>
+   * @param nodeResourceMap the map of {@code <NodeId, ResourceOption>}
    */
    */
   @Public
   @Public
   @Evolving
   @Evolving

+ 9 - 15
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AHSClient.java

@@ -56,28 +56,22 @@ public abstract class AHSClient extends AbstractService {
   }
   }
 
 
   /**
   /**
-   * <p>
    * Get a report of the given Application.
    * Get a report of the given Application.
-   * </p>
-   * 
    * <p>
    * <p>
    * In secure mode, <code>YARN</code> verifies access to the application, queue
    * In secure mode, <code>YARN</code> verifies access to the application, queue
    * etc. before accepting the request.
    * etc. before accepting the request.
-   * </p>
-   * 
    * <p>
    * <p>
    * If the user does not have <code>VIEW_APP</code> access then the following
    * If the user does not have <code>VIEW_APP</code> access then the following
    * fields in the report will be set to stubbed values:
    * fields in the report will be set to stubbed values:
    * <ul>
    * <ul>
-   * <li>host - set to "N/A"</li>
-   * <li>RPC port - set to -1</li>
-   * <li>client token - set to "N/A"</li>
-   * <li>diagnostics - set to "N/A"</li>
-   * <li>tracking URL - set to "N/A"</li>
-   * <li>original tracking URL - set to "N/A"</li>
-   * <li>resource usage report - all values are -1</li>
+   *   <li>host - set to "N/A"</li>
+   *   <li>RPC port - set to -1</li>
+   *   <li>client token - set to "N/A"</li>
+   *   <li>diagnostics - set to "N/A"</li>
+   *   <li>tracking URL - set to "N/A"</li>
+   *   <li>original tracking URL - set to "N/A"</li>
+   *   <li>resource usage report - all values are -1</li>
    * </ul>
    * </ul>
-   * </p>
    * 
    * 
    * @param appId
    * @param appId
    *          {@link ApplicationId} of the application that needs a report
    *          {@link ApplicationId} of the application that needs a report
@@ -121,7 +115,7 @@ public abstract class AHSClient extends AbstractService {
    *          a report
    *          a report
    * @return application attempt report
    * @return application attempt report
    * @throws YarnException
    * @throws YarnException
-   * @throws {@link ApplicationAttemptNotFoundException} if application attempt
+   * @throws ApplicationAttemptNotFoundException if application attempt
    *         not found
    *         not found
    * @throws IOException
    * @throws IOException
    */
    */
@@ -157,7 +151,7 @@ public abstract class AHSClient extends AbstractService {
    *          {@link ContainerId} of the container that needs a report
    *          {@link ContainerId} of the container that needs a report
    * @return container report
    * @return container report
    * @throws YarnException
    * @throws YarnException
-   * @throws {@link ContainerNotFoundException} if container not found
+   * @throws ContainerNotFoundException if container not found
    * @throws IOException
    * @throws IOException
    */
    */
   public abstract ContainerReport getContainerReport(ContainerId containerId)
   public abstract ContainerReport getContainerReport(ContainerId containerId)

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java

@@ -349,7 +349,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
    * Set the NM token cache for the <code>AMRMClient</code>. This cache must
    * Set the NM token cache for the <code>AMRMClient</code>. This cache must
    * be shared with the {@link NMClient} used to manage containers for the
    * be shared with the {@link NMClient} used to manage containers for the
    * <code>AMRMClient</code>
    * <code>AMRMClient</code>
-   * <p/>
+   * <p>
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * singleton instance will be used.
    * singleton instance will be used.
    *
    *
@@ -363,7 +363,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
    * Get the NM token cache of the <code>AMRMClient</code>. This cache must be
    * Get the NM token cache of the <code>AMRMClient</code>. This cache must be
    * shared with the {@link NMClient} used to manage containers for the
    * shared with the {@link NMClient} used to manage containers for the
    * <code>AMRMClient</code>.
    * <code>AMRMClient</code>.
-   * <p/>
+   * <p>
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * singleton instance will be used.
    * singleton instance will be used.
    *
    *

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMClient.java

@@ -125,7 +125,7 @@ public abstract class NMClient extends AbstractService {
    * Set the NM Token cache of the <code>NMClient</code>. This cache must be
    * Set the NM Token cache of the <code>NMClient</code>. This cache must be
    * shared with the {@link AMRMClient} that requested the containers managed
    * shared with the {@link AMRMClient} that requested the containers managed
    * by this <code>NMClient</code>
    * by this <code>NMClient</code>
-   * <p/>
+   * <p>
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * singleton instance will be used.
    * singleton instance will be used.
    *
    *
@@ -139,7 +139,7 @@ public abstract class NMClient extends AbstractService {
    * Get the NM token cache of the <code>NMClient</code>. This cache must be
    * Get the NM token cache of the <code>NMClient</code>. This cache must be
    * shared with the {@link AMRMClient} that requested the containers managed
    * shared with the {@link AMRMClient} that requested the containers managed
    * by this <code>NMClient</code>
    * by this <code>NMClient</code>
-   * <p/>
+   * <p>
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
    * singleton instance will be used.
    * singleton instance will be used.
    *
    *

+ 27 - 31
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/NMTokenCache.java

@@ -34,26 +34,26 @@ import com.google.common.annotations.VisibleForTesting;
 /**
 /**
  * NMTokenCache manages NMTokens required for an Application Master
  * NMTokenCache manages NMTokens required for an Application Master
  * communicating with individual NodeManagers.
  * communicating with individual NodeManagers.
- * <p/>
+ * <p>
  * By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
  * By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
  * {@link #getSingleton()} instance of the cache.
  * {@link #getSingleton()} instance of the cache.
  * <ul>
  * <ul>
- * <li>Using the singleton instance of the cache is appropriate when running a
- * single ApplicationMaster in the same JVM.</li>
- * <li>When using the singleton, users don't need to do anything special,
- * {@link AMRMClient} and {@link NMClient} are already set up to use the default
- * singleton {@link NMTokenCache}</li>
+ *   <li>
+ *     Using the singleton instance of the cache is appropriate when running a
+ *     single ApplicationMaster in the same JVM.
+ *   </li>
+ *   <li>
+ *     When using the singleton, users don't need to do anything special,
+ *     {@link AMRMClient} and {@link NMClient} are already set up to use the
+ *     default singleton {@link NMTokenCache}
+ *     </li>
  * </ul>
  * </ul>
- * <p/>
  * If running multiple Application Masters in the same JVM, a different cache
  * If running multiple Application Masters in the same JVM, a different cache
  * instance should be used for each Application Master.
  * instance should be used for each Application Master.
- * <p/>
  * <ul>
  * <ul>
- * <li>
- * If using the {@link AMRMClient} and the {@link NMClient}, setting up and using
- * an instance cache is as follows:
- * <p/>
- * 
+ *   <li>
+ *     If using the {@link AMRMClient} and the {@link NMClient}, setting up
+ *     and using an instance cache is as follows:
  * <pre>
  * <pre>
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   AMRMClient rmClient = AMRMClient.createAMRMClient();
  *   AMRMClient rmClient = AMRMClient.createAMRMClient();
@@ -61,12 +61,10 @@ import com.google.common.annotations.VisibleForTesting;
  *   nmClient.setNMTokenCache(nmTokenCache);
  *   nmClient.setNMTokenCache(nmTokenCache);
  *   ...
  *   ...
  * </pre>
  * </pre>
- * </li>
- * <li>
- * If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, setting up
- * and using an instance cache is as follows:
- * <p/>
- * 
+ *   </li>
+ *   <li>
+ *     If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
+ *     setting up and using an instance cache is as follows:
  * <pre>
  * <pre>
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   AMRMClient rmClient = AMRMClient.createAMRMClient();
  *   AMRMClient rmClient = AMRMClient.createAMRMClient();
@@ -76,13 +74,11 @@ import com.google.common.annotations.VisibleForTesting;
  *   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
  *   NMClientAsync nmClientAsync = new NMClientAsync("nmClient", nmClient, [NM_CALLBACK]);
  *   ...
  *   ...
  * </pre>
  * </pre>
- * </li>
- * <li>
- * If using {@link ApplicationMasterProtocol} and
- * {@link ContainerManagementProtocol} directly, setting up and using an
- * instance cache is as follows:
- * <p/>
- * 
+ *   </li>
+ *   <li>
+ *     If using {@link ApplicationMasterProtocol} and
+ *     {@link ContainerManagementProtocol} directly, setting up and using an
+ *     instance cache is as follows:
  * <pre>
  * <pre>
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   NMTokenCache nmTokenCache = new NMTokenCache();
  *   ...
  *   ...
@@ -100,12 +96,12 @@ import com.google.common.annotations.VisibleForTesting;
  *   nmPro.startContainer(container, containerContext);
  *   nmPro.startContainer(container, containerContext);
  *   ...
  *   ...
  * </pre>
  * </pre>
- * </li>
+ *   </li>
  * </ul>
  * </ul>
- * It is also possible to mix the usage of a client (<code>AMRMClient</code> or
- * <code>NMClient</code>, or the async versions of them) with a protocol proxy (
- * <code>ContainerManagementProtocolProxy</code> or
- * <code>ApplicationMasterProtocol</code>).
+ * It is also possible to mix the usage of a client ({@code AMRMClient} or
+ * {@code NMClient}, or the async versions of them) with a protocol proxy
+ * ({@code ContainerManagementProtocolProxy} or
+ * {@code ApplicationMasterProtocol}).
  */
  */
 @Public
 @Public
 @Evolving
 @Evolving

+ 11 - 12
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java

@@ -32,14 +32,12 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -58,8 +56,10 @@ import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
 import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
+import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
 import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
+import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 
 
@@ -171,7 +171,6 @@ public abstract class YarnClient extends AbstractService {
    * <li>original tracking URL - set to "N/A"</li>
    * <li>original tracking URL - set to "N/A"</li>
    * <li>resource usage report - all values are -1</li>
    * <li>resource usage report - all values are -1</li>
    * </ul>
    * </ul>
-   * </p>
    * 
    * 
    * @param appId
    * @param appId
    *          {@link ApplicationId} of the application that needs a report
    *          {@link ApplicationId} of the application that needs a report
@@ -184,20 +183,20 @@ public abstract class YarnClient extends AbstractService {
 
 
   /**
   /**
    * Get the AMRM token of the application.
    * Get the AMRM token of the application.
-   * <p/>
+   * <p>
    * The AMRM token is required for AM to RM scheduling operations. For 
    * The AMRM token is required for AM to RM scheduling operations. For 
    * managed Application Masters Yarn takes care of injecting it. For unmanaged
    * managed Application Masters Yarn takes care of injecting it. For unmanaged
    * Applications Masters, the token must be obtained via this method and set
    * Applications Masters, the token must be obtained via this method and set
    * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
    * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
    * current user.
    * current user.
-   * <p/>
+   * <p>
    * The AMRM token will be returned only if all the following conditions are
    * The AMRM token will be returned only if all the following conditions are
    * met:
    * met:
-   * <li>
-   *   <ul>the requester is the owner of the ApplicationMaster</ul>
-   *   <ul>the application master is an unmanaged ApplicationMaster</ul>
-   *   <ul>the application master is in ACCEPTED state</ul>
-   * </li>
+   * <ul>
+   *   <li>the requester is the owner of the ApplicationMaster</li>
+   *   <li>the application master is an unmanaged ApplicationMaster</li>
+   *   <li>the application master is in ACCEPTED state</li>
+   * </ul>
    * Else this method returns NULL.
    * Else this method returns NULL.
    *
    *
    * @param appId {@link ApplicationId} of the application to get the AMRM token
    * @param appId {@link ApplicationId} of the application to get the AMRM token
@@ -415,7 +414,7 @@ public abstract class YarnClient extends AbstractService {
    *          a report
    *          a report
    * @return application attempt report
    * @return application attempt report
    * @throws YarnException
    * @throws YarnException
-   * @throws {@link ApplicationAttemptNotFoundException} if application attempt
+   * @throws ApplicationAttemptNotFoundException if application attempt
    *         not found
    *         not found
    * @throws IOException
    * @throws IOException
    */
    */
@@ -450,7 +449,7 @@ public abstract class YarnClient extends AbstractService {
    *          {@link ContainerId} of the container that needs a report
    *          {@link ContainerId} of the container that needs a report
    * @return container report
    * @return container report
    * @throws YarnException
    * @throws YarnException
-   * @throws {@link ContainerNotFoundException} if container not found.
+   * @throws ContainerNotFoundException if container not found.
    * @throws IOException
    * @throws IOException
    */
    */
   public abstract ContainerReport getContainerReport(ContainerId containerId)
   public abstract ContainerReport getContainerReport(ContainerId containerId)

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java

@@ -344,7 +344,7 @@ public class CommonNodeLabelsManager extends AbstractService {
   /**
   /**
    * add more labels to nodes
    * add more labels to nodes
    * 
    * 
-   * @param addedLabelsToNode node -> labels map
+   * @param addedLabelsToNode node {@literal ->} labels map
    */
    */
   public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode)
   public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode)
       throws IOException {
       throws IOException {
@@ -614,7 +614,7 @@ public class CommonNodeLabelsManager extends AbstractService {
    * remove labels from nodes, labels being removed most be contained by these
    * remove labels from nodes, labels being removed most be contained by these
    * nodes
    * nodes
    * 
    * 
-   * @param removeLabelsFromNode node -> labels map
+   * @param removeLabelsFromNode node {@literal ->} labels map
    */
    */
   public void
   public void
       removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode)
       removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode)
@@ -668,7 +668,7 @@ public class CommonNodeLabelsManager extends AbstractService {
   /**
   /**
    * replace labels to nodes
    * replace labels to nodes
    * 
    * 
-   * @param replaceLabelsToNode node -> labels map
+   * @param replaceLabelsToNode node {@literal ->} labels map
    */
    */
   public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode)
   public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode)
       throws IOException {
       throws IOException {

+ 1 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/NodeLabelsStore.java

@@ -35,7 +35,7 @@ public abstract class NodeLabelsStore implements Closeable {
   }
   }
   
   
   /**
   /**
-   * Store node -> label
+   * Store node {@literal ->} label
    */
    */
   public abstract void updateNodeToLabelsMappings(
   public abstract void updateNodeToLabelsMappings(
       Map<NodeId, Set<String>> nodeToLabels) throws IOException;
       Map<NodeId, Set<String>> nodeToLabels) throws IOException;
@@ -54,7 +54,6 @@ public abstract class NodeLabelsStore implements Closeable {
   
   
   /**
   /**
    * Recover labels and node to labels mappings from store
    * Recover labels and node to labels mappings from store
-   * @param conf
    */
    */
   public abstract void recover() throws IOException;
   public abstract void recover() throws IOException;
   
   

+ 0 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/security/ApplicationACLsManager.java

@@ -93,7 +93,6 @@ public class ApplicationACLsManager {
    * @param applicationAccessType
    * @param applicationAccessType
    * @param applicationOwner
    * @param applicationOwner
    * @param applicationId
    * @param applicationId
-   * @throws AccessControlException
    */
    */
   public boolean checkAccess(UserGroupInformation callerUGI,
   public boolean checkAccess(UserGroupInformation callerUGI,
       ApplicationAccessType applicationAccessType, String applicationOwner,
       ApplicationAccessType applicationAccessType, String applicationOwner,

+ 3 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/StringHelper.java

@@ -88,7 +88,7 @@ public final class StringHelper {
   }
   }
 
 
   /**
   /**
-   * Join on slash & colon (e.g., path args in routing spec)
+   * Join on slash and colon (e.g., path args in routing spec)
    * @param args to join
    * @param args to join
    * @return args joined with /:
    * @return args joined with /:
    */
    */
@@ -116,7 +116,7 @@ public final class StringHelper {
   }
   }
 
 
   /**
   /**
-   * Split on space & trim results.
+   * Split on space and trim results.
    * @param s the string to split
    * @param s the string to split
    * @return an iterable of strings
    * @return an iterable of strings
    */
    */
@@ -125,7 +125,7 @@ public final class StringHelper {
   }
   }
 
 
   /**
   /**
-   * Split on _ & trim results
+   * Split on _ and trim results
    * @param s the string to split
    * @param s the string to split
    * @return an iterable of strings
    * @return an iterable of strings
    */
    */

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java

@@ -52,13 +52,13 @@ import com.google.inject.servlet.GuiceFilter;
 /**
 /**
  * Helpers to create an embedded webapp.
  * Helpers to create an embedded webapp.
  *
  *
- * <h4>Quick start:</h4>
+ * <b>Quick start:</b>
  * <pre>
  * <pre>
  *   WebApp wa = WebApps.$for(myApp).start();</pre>
  *   WebApp wa = WebApps.$for(myApp).start();</pre>
  * Starts a webapp with default routes binds to 0.0.0.0 (all network interfaces)
  * Starts a webapp with default routes binds to 0.0.0.0 (all network interfaces)
  * on an ephemeral port, which can be obtained with:<pre>
  * on an ephemeral port, which can be obtained with:<pre>
  *   int port = wa.port();</pre>
  *   int port = wa.port();</pre>
- * <h4>With more options:</h4>
+ * <b>With more options:</b>
  * <pre>
  * <pre>
  *   WebApp wa = WebApps.$for(myApp).at(address, port).
  *   WebApp wa = WebApps.$for(myApp).at(address, port).
  *                        with(configuration).
  *                        with(configuration).

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java

@@ -116,10 +116,10 @@ public class RegistryUtils {
   }
   }
 
 
   /**
   /**
-   * Create a path to a service under a user & service class
+   * Create a path to a service under a user and service class
    * @param user username or ""
    * @param user username or ""
    * @param serviceClass service name
    * @param serviceClass service name
-   * @param serviceName service name unique for that user & service class
+   * @param serviceName service name unique for that user and service class
    * @return a full path
    * @return a full path
    */
    */
   public static String servicePath(String user,
   public static String servicePath(String user,
@@ -135,7 +135,7 @@ public class RegistryUtils {
    * Create a path for listing components under a service
    * Create a path for listing components under a service
    * @param user username or ""
    * @param user username or ""
    * @param serviceClass service name
    * @param serviceClass service name
-   * @param serviceName service name unique for that user & service class
+   * @param serviceName service name unique for that user and service class
    * @return a full path
    * @return a full path
    */
    */
   public static String componentListPath(String user,
   public static String componentListPath(String user,
@@ -149,7 +149,7 @@ public class RegistryUtils {
    * Create the path to a service record for a component
    * Create the path to a service record for a component
    * @param user username or ""
    * @param user username or ""
    * @param serviceClass service name
    * @param serviceClass service name
-   * @param serviceName service name unique for that user & service class
+   * @param serviceName service name unique for that user and service class
    * @param componentName unique name/ID of the component
    * @param componentName unique name/ID of the component
    * @return a full path
    * @return a full path
    */
    */

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/RegistryOperationsClient.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
  *
  *
  * For SASL, the client must be operating in the context of an authed user.
  * For SASL, the client must be operating in the context of an authed user.
  *
  *
- * For id:pass the client must have the relevant id & password, SASL is
+ * For id:pass the client must have the relevant id and password, SASL is
  * not used even if the client has credentials.
  * not used even if the client has credentials.
  *
  *
  * For anonymous, nothing is used.
  * For anonymous, nothing is used.

+ 1 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java

@@ -71,14 +71,13 @@ public interface ZookeeperConfigOptions {
    * The SASL client username: {@value}.
    * The SASL client username: {@value}.
    * <p>
    * <p>
    * Set this to the <i>short</i> name of the client, e.g, "user",
    * Set this to the <i>short</i> name of the client, e.g, "user",
-   * not <code>user/host</code>, or <code>user/host@REALM</code>
+   * not {@code user/host}, or {@code user/host@REALM}
    */
    */
   String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username";
   String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username";
 
 
   /**
   /**
    * The SASL Server context, referring to a context in the JVM's
    * The SASL Server context, referring to a context in the JVM's
    * JAAS context file: {@value}
    * JAAS context file: {@value}
-   * <p>
    */
    */
   String PROP_ZK_SERVER_SASL_CONTEXT =
   String PROP_ZK_SERVER_SASL_CONTEXT =
       ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY;
       ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY;

+ 5 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java

@@ -47,16 +47,16 @@ import java.net.UnknownHostException;
 /**
 /**
  * This is a small, localhost Zookeeper service instance that is contained
  * This is a small, localhost Zookeeper service instance that is contained
  * in a YARN service...it's been derived from Apache Twill.
  * in a YARN service...it's been derived from Apache Twill.
- *
+ * <p>
  * It implements {@link RegistryBindingSource} and provides binding information,
  * It implements {@link RegistryBindingSource} and provides binding information,
- * <i>once started</i>. Until <code>start()</code> is called, the hostname &
+ * <i>once started</i>. Until {@link #start()} is called, the hostname and
  * port may be undefined. Accordingly, the service raises an exception in this
  * port may be undefined. Accordingly, the service raises an exception in this
  * condition.
  * condition.
- *
+ * <p>
  * If you wish to chain together a registry service with this one under
  * If you wish to chain together a registry service with this one under
- * the same <code>CompositeService</code>, this service must be added
+ * the same {@code CompositeService}, this service must be added
  * as a child first.
  * as a child first.
- *
+ * <p>
  * It also sets the configuration parameter
  * It also sets the configuration parameter
  * {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
  * {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
  * to its connection string. Any code with access to the service configuration
  * to its connection string. Any code with access to the service configuration

+ 4 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java

@@ -19,9 +19,10 @@
 /**
 /**
  * Basic services for the YARN registry
  * Basic services for the YARN registry
  * <ul>
  * <ul>
- *   <li>The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}</ol>
- *   extends the shared Yarn Registry client with registry setup and
- *   (potentially asynchronous) administrative actions.
+ *   <li>
+ *     The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}
+ *     extends the shared Yarn Registry client with registry setup and
+ *     (potentially asynchronous) administrative actions.
  *   </li>
  *   </li>
  *   <li>
  *   <li>
  *     The {@link org.apache.hadoop.registry.server.services.MicroZookeeperService}
  *     The {@link org.apache.hadoop.registry.server.services.MicroZookeeperService}
@@ -33,8 +34,6 @@
  *     extends the standard YARN composite service by making its add and remove
  *     extends the standard YARN composite service by making its add and remove
  *     methods public. It is a utility service used in parts of the codebase
  *     methods public. It is a utility service used in parts of the codebase
  *   </li>
  *   </li>
- *
  * </ul>
  * </ul>
- *
  */
  */
 package org.apache.hadoop.registry.server.services;
 package org.apache.hadoop.registry.server.services;

+ 4 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java

@@ -45,17 +45,15 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
 /**
 /**
- * <p>
  * Initializes {@link TimelineAuthenticationFilter} which provides support for
  * Initializes {@link TimelineAuthenticationFilter} which provides support for
  * Kerberos HTTP SPNEGO authentication.
  * Kerberos HTTP SPNEGO authentication.
- * <p/>
  * <p>
  * <p>
  * It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
  * It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
  * timeline server.
  * timeline server.
- * <p/>
- * Refer to the <code>core-default.xml</code> file, after the comment 'HTTP
+ * <p>
+ * Refer to the {@code core-default.xml} file, after the comment 'HTTP
  * Authentication' for details on the configuration options. All related
  * Authentication' for details on the configuration options. All related
- * configuration properties have 'hadoop.http.authentication.' as prefix.
+ * configuration properties have {@code hadoop.http.authentication.} as prefix.
  */
  */
 public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
 public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
 
 
@@ -71,14 +69,11 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
   Map<String, String> filterConfig;
   Map<String, String> filterConfig;
 
 
   /**
   /**
-   * <p>
    * Initializes {@link TimelineAuthenticationFilter}
    * Initializes {@link TimelineAuthenticationFilter}
-   * <p/>
    * <p>
    * <p>
    * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
    * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
    * configuration properties prefixed with
    * configuration properties prefixed with
-   * "yarn.timeline-service.authentication."
-   * </p>
+   * {@code yarn.timeline-service.authentication.}
    * 
    * 
    * @param container
    * @param container
    *          The filter container
    *          The filter container

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java

@@ -40,7 +40,7 @@ public class ZKClient {
    * the zookeeper client library to 
    * the zookeeper client library to 
    * talk to zookeeper 
    * talk to zookeeper 
    * @param string the host
    * @param string the host
-   * @throws throws IOException
+   * @throws IOException
    */
    */
   public ZKClient(String string) throws IOException {
   public ZKClient(String string) throws IOException {
     zkClient = new ZooKeeper(string, 30000, new ZKWatcher());
     zkClient = new ZooKeeper(string, 30000, new ZKWatcher());

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java

@@ -52,7 +52,8 @@ public abstract class RegisterNodeManagerRequest {
    * We introduce this here because currently YARN RM doesn't persist nodes info
    * We introduce this here because currently YARN RM doesn't persist nodes info
    * for application running. When RM restart happened, we cannot determinate if
    * for application running. When RM restart happened, we cannot determinate if
    * a node should do application cleanup (like log-aggregation, status update,
    * a node should do application cleanup (like log-aggregation, status update,
-   * etc.) or not. <p/>
+   * etc.) or not.
+   * <p>
    * When we have this running application list in node manager register
    * When we have this running application list in node manager register
    * request, we can recover nodes info for running applications. And then we
    * request, we can recover nodes info for running applications. And then we
    * can take actions accordingly
    * can take actions accordingly

+ 11 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeHealthStatus.java

@@ -26,19 +26,17 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
- * <p><code>NodeHealthStatus</code> is a summary of the health status of the
- * node.</p>
- *
- * <p>It includes information such as:
- *   <ul>
- *     <li>
- *       An indicator of whether the node is healthy, as determined by the 
- *       health-check script.
- *     </li>
- *     <li>The previous time at which the health status was reported.</li>
- *     <li>A diagnostic report on the health status.</li>
- *   </ul>
- * </p>
+ * {@code NodeHealthStatus} is a summary of the health status of the node.
+ * <p>
+ * It includes information such as:
+ * <ul>
+ *   <li>
+ *     An indicator of whether the node is healthy, as determined by the
+ *     health-check script.
+ *   </li>
+ *   <li>The previous time at which the health status was reported.</li>
+ *   <li>A diagnostic report on the health status.</li>
+ * </ul>
  * 
  * 
  * @see NodeReport
  * @see NodeReport
  * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
  * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)

+ 5 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java

@@ -102,13 +102,15 @@ public abstract class ContainerExecutor implements Configurable {
   
   
   /**
   /**
    * Prepare the environment for containers in this application to execute.
    * Prepare the environment for containers in this application to execute.
+   * <pre>
    * For $x in local.dirs
    * For $x in local.dirs
    *   create $x/$user/$appId
    *   create $x/$user/$appId
-   * Copy $nmLocal/appTokens -> $N/$user/$appId
+   * Copy $nmLocal/appTokens {@literal ->} $N/$user/$appId
    * For $rsrc in private resources
    * For $rsrc in private resources
-   *   Copy $rsrc -> $N/$user/filecache/[idef]
+   *   Copy $rsrc {@literal ->} $N/$user/filecache/[idef]
    * For $rsrc in job resources
    * For $rsrc in job resources
-   *   Copy $rsrc -> $N/$user/$appId/filecache/idef
+   *   Copy $rsrc {@literal ->} $N/$user/$appId/filecache/idef
+   * </pre>
    * @param user user name of application owner
    * @param user user name of application owner
    * @param appId id of the application
    * @param appId id of the application
    * @param nmPrivateContainerTokens path to localized credentials, rsrc by NM
    * @param nmPrivateContainerTokens path to localized credentials, rsrc by NM

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/NodeManagerHardwareUtils.java

@@ -65,11 +65,11 @@ public class NodeManagerHardwareUtils {
   }
   }
 
 
   /**
   /**
-   * Gets the percentage of physical CPU that is configured for YARN containers
-   * This is percent > 0 and <= 100  based on
-   * YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
+   * Gets the percentage of physical CPU that is configured for YARN containers.
+   * This is percent {@literal >} 0 and {@literal <=} 100 based on
+   * {@link YarnConfiguration#NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT}
    * @param conf Configuration object
    * @param conf Configuration object
-   * @return percent > 0 and <= 100
+   * @return percent {@literal >} 0 and {@literal <=} 100
    */
    */
   public static int getNodeCpuPercentage(Configuration conf) {
   public static int getNodeCpuPercentage(Configuration conf) {
     int nodeCpuPercentage =
     int nodeCpuPercentage =

+ 7 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java

@@ -216,11 +216,14 @@ public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent> {
   /**
   /**
    * Return the flag which indicates whether the attempt failure should be
    * Return the flag which indicates whether the attempt failure should be
    * counted to attempt retry count.
    * counted to attempt retry count.
-   * <ul>
+   * <p>
    * There failure types should not be counted to attempt retry count:
    * There failure types should not be counted to attempt retry count:
-   * <li>preempted by the scheduler.</li>
-   * <li>hardware failures, such as NM failing, lost NM and NM disk errors.</li>
-   * <li>killed by RM because of RM restart or failover.</li>
+   * <ul>
+   *   <li>preempted by the scheduler.</li>
+   *   <li>
+   *     hardware failures, such as NM failing, lost NM and NM disk errors.
+   *   </li>
+   *   <li>killed by RM because of RM restart or failover.</li>
    * </ul>
    * </ul>
    */
    */
   boolean shouldCountTowardsMaxAttemptRetry();
   boolean shouldCountTowardsMaxAttemptRetry();

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java

@@ -114,7 +114,7 @@ public abstract class SchedulerNode {
 
 
   /**
   /**
    * Get the name of the node for scheduling matching decisions.
    * Get the name of the node for scheduling matching decisions.
-   * <p/>
+   * <p>
    * Typically this is the 'hostname' reported by the node, but it could be
    * Typically this is the 'hostname' reported by the node, but it could be
    * configured to be 'hostname:port' reported by the node via the
    * configured to be 'hostname:port' reported by the node via the
    * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant.
    * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant.

+ 1 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java

@@ -194,8 +194,7 @@ public class SchedulerUtils {
    * Utility method to validate a resource request, by insuring that the
    * Utility method to validate a resource request, by insuring that the
    * requested memory/vcore is non-negative and not greater than max
    * requested memory/vcore is non-negative and not greater than max
    * 
    * 
-   * @throws <code>InvalidResourceRequestException</code> when there is invalid
-   *         request
+   * @throws InvalidResourceRequestException when there is invalid request
    */
    */
   public static void validateResourceRequest(ResourceRequest resReq,
   public static void validateResourceRequest(ResourceRequest resReq,
       Resource maximumResource, String queueName, YarnScheduler scheduler)
       Resource maximumResource, String queueName, YarnScheduler scheduler)

+ 10 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java

@@ -71,7 +71,7 @@ public class ComputeFairShares {
    * fair shares. The min and max shares and of the Schedulables are assumed to
    * fair shares. The min and max shares and of the Schedulables are assumed to
    * be set beforehand. We compute the fairest possible allocation of shares to
    * be set beforehand. We compute the fairest possible allocation of shares to
    * the Schedulables that respects their min and max shares.
    * the Schedulables that respects their min and max shares.
-   * 
+   * <p>
    * To understand what this method does, we must first define what weighted
    * To understand what this method does, we must first define what weighted
    * fair sharing means in the presence of min and max shares. If there
    * fair sharing means in the presence of min and max shares. If there
    * were no minimum or maximum shares, then weighted fair sharing would be
    * were no minimum or maximum shares, then weighted fair sharing would be
@@ -79,30 +79,31 @@ public class ComputeFairShares {
    * Schedulable and all slots were assigned. Minimum and maximum shares add a
    * Schedulable and all slots were assigned. Minimum and maximum shares add a
    * further twist - Some Schedulables may have a min share higher than their
    * further twist - Some Schedulables may have a min share higher than their
    * assigned share or a max share lower than their assigned share.
    * assigned share or a max share lower than their assigned share.
-   * 
+   * <p>
    * To deal with these possibilities, we define an assignment of slots as being
    * To deal with these possibilities, we define an assignment of slots as being
    * fair if there exists a ratio R such that: Schedulables S where S.minShare
    * fair if there exists a ratio R such that: Schedulables S where S.minShare
-   * > R * S.weight are given share S.minShare - Schedulables S where S.maxShare
-   * < R * S.weight are given S.maxShare - All other Schedulables S are
-   * assigned share R * S.weight - The sum of all the shares is totalSlots.
-   * 
+   * {@literal >} R * S.weight are given share S.minShare - Schedulables S
+   * where S.maxShare {@literal <} R * S.weight are given S.maxShare -
+   * All other Schedulables S are assigned share R * S.weight -
+   * The sum of all the shares is totalSlots.
+   * <p>
    * We call R the weight-to-slots ratio because it converts a Schedulable's
    * We call R the weight-to-slots ratio because it converts a Schedulable's
    * weight to the number of slots it is assigned.
    * weight to the number of slots it is assigned.
-   * 
+   * <p>
    * We compute a fair allocation by finding a suitable weight-to-slot ratio R.
    * We compute a fair allocation by finding a suitable weight-to-slot ratio R.
    * To do this, we use binary search. Given a ratio R, we compute the number of
    * To do this, we use binary search. Given a ratio R, we compute the number of
    * slots that would be used in total with this ratio (the sum of the shares
    * slots that would be used in total with this ratio (the sum of the shares
    * computed using the conditions above). If this number of slots is less than
    * computed using the conditions above). If this number of slots is less than
    * totalSlots, then R is too small and more slots could be assigned. If the
    * totalSlots, then R is too small and more slots could be assigned. If the
    * number of slots is more than totalSlots, then R is too large.
    * number of slots is more than totalSlots, then R is too large.
-   * 
+   * <p>
    * We begin the binary search with a lower bound on R of 0 (which means that
    * We begin the binary search with a lower bound on R of 0 (which means that
    * all Schedulables are only given their minShare) and an upper bound computed
    * all Schedulables are only given their minShare) and an upper bound computed
    * to be large enough that too many slots are given (by doubling R until we
    * to be large enough that too many slots are given (by doubling R until we
    * use more than totalResources resources). The helper method
    * use more than totalResources resources). The helper method
    * resourceUsedWithWeightToResourceRatio computes the total resources used with a
    * resourceUsedWithWeightToResourceRatio computes the total resources used with a
    * given value of R.
    * given value of R.
-   * 
+   * <p>
    * The running time of this algorithm is linear in the number of Schedulables,
    * The running time of this algorithm is linear in the number of Schedulables,
    * because resourceUsedWithWeightToResourceRatio is linear-time and the number of
    * because resourceUsedWithWeightToResourceRatio is linear-time and the number of
    * iterations of binary search is a constant (dependent on desired precision).
    * iterations of binary search is a constant (dependent on desired precision).

+ 0 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java

@@ -364,7 +364,6 @@ public class DelegationTokenRenewer extends AbstractService {
    * @param shouldCancelAtEnd true if tokens should be canceled when the app is
    * @param shouldCancelAtEnd true if tokens should be canceled when the app is
    * done else false. 
    * done else false. 
    * @param user user
    * @param user user
-   * @throws IOException
    */
    */
   public void addApplicationAsync(ApplicationId applicationId, Credentials ts,
   public void addApplicationAsync(ApplicationId applicationId, Credentials ts,
       boolean shouldCancelAtEnd, String user) {
       boolean shouldCancelAtEnd, String user) {
@@ -634,7 +633,6 @@ public class DelegationTokenRenewer extends AbstractService {
   
   
   /**
   /**
    * removing failed DT
    * removing failed DT
-   * @param applicationId
    */
    */
   private void removeFailedDelegationToken(DelegationTokenToRenew t) {
   private void removeFailedDelegationToken(DelegationTokenToRenew t) {
     ApplicationId applicationId = t.applicationId;
     ApplicationId applicationId = t.applicationId;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/ProxyUriUtils.java

@@ -181,7 +181,7 @@ public class ProxyUriUtils {
   
   
   /**
   /**
    * Returns the scheme if present in the url
    * Returns the scheme if present in the url
-   * eg. "https://issues.apache.org/jira/browse/YARN" > "https"
+   * eg. "https://issues.apache.org/jira/browse/YARN" {@literal ->} "https"
    */
    */
   public static String getSchemeFromUrl(String url) {
   public static String getSchemeFromUrl(String url) {
     int index = 0;
     int index = 0;