Browse Source

HDFS-13944. [JDK10] Fix javadoc errors in hadoop-hdfs-rbf module. Contributed by Inigo Goiri.

Akira Ajisaka 6 years ago
parent
commit
fa7f7078a7
35 changed files with 166 additions and 104 deletions
  1. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
  2. 6 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java
  3. 1 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java
  4. 2 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java
  5. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java
  6. 3 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java
  8. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
  9. 3 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java
  10. 2 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java
  11. 9 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java
  12. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java
  13. 3 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
  14. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
  15. 3 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java
  16. 4 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java
  17. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java
  18. 15 8
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
  19. 10 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
  20. 16 10
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java
  21. 9 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java
  22. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java
  23. 9 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java
  24. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java
  25. 9 10
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
  26. 4 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java
  27. 11 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java
  28. 0 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java
  29. 5 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java
  30. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java
  31. 0 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java
  32. 15 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java
  34. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java
  35. 5 0
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java

@@ -102,7 +102,7 @@ public class RouterAdminProtocolServerSideTranslatorPB implements
   /**
   /**
    * Constructor.
    * Constructor.
    * @param server The NN server.
    * @param server The NN server.
-   * @throws IOException
+   * @throws IOException if it cannot create the translator.
    */
    */
   public RouterAdminProtocolServerSideTranslatorPB(RouterAdminServer server)
   public RouterAdminProtocolServerSideTranslatorPB(RouterAdminServer server)
       throws IOException {
       throws IOException {

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java

@@ -27,16 +27,17 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
  * Locates the most active NN for a given nameservice ID or blockpool ID. This
  * Locates the most active NN for a given nameservice ID or blockpool ID. This
- * interface is used by the {@link org.apache.hadoop.hdfs.server.federation.
- * router.RouterRpcServer RouterRpcServer} to:
+ * interface is used by the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer
+ * RouterRpcServer} to:
  * <ul>
  * <ul>
  * <li>Determine the target NN for a given subcluster.
  * <li>Determine the target NN for a given subcluster.
  * <li>List of all namespaces discovered/active in the federation.
  * <li>List of all namespaces discovered/active in the federation.
  * <li>Update the currently active NN empirically.
  * <li>Update the currently active NN empirically.
  * </ul>
  * </ul>
- * The interface is also used by the {@link org.apache.hadoop.hdfs.server.
- * federation.router.NamenodeHeartbeatService NamenodeHeartbeatService} to
- * register a discovered NN.
+ * The interface is also used by the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.NamenodeHeartbeatService
+ * NamenodeHeartbeatService} to register a discovered NN.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/FileSubclusterResolver.java

@@ -60,6 +60,7 @@ public interface FileSubclusterResolver {
    * Get a list of mount points for a path. Results are from the mount table
    * Get a list of mount points for a path. Results are from the mount table
    * cache.
    * cache.
    *
    *
+   * @param path Path to get the mount points under.
    * @return List of mount points present at this path or zero-length list if
    * @return List of mount points present at this path or zero-length list if
    *         none are found.
    *         none are found.
    * @throws IOException Throws exception if the data is not available.
    * @throws IOException Throws exception if the data is not available.

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java

@@ -416,6 +416,7 @@ public class MountTableResolver
    * the read lock.
    * the read lock.
    * @param path Path to check/insert.
    * @param path Path to check/insert.
    * @return New remote location.
    * @return New remote location.
+   * @throws IOException If it cannot find the location.
    */
    */
   public PathLocation lookupLocation(final String path) throws IOException {
   public PathLocation lookupLocation(final String path) throws IOException {
     PathLocation ret = null;
     PathLocation ret = null;
@@ -631,6 +632,7 @@ public class MountTableResolver
   /**
   /**
    * Get the size of the cache.
    * Get the size of the cache.
    * @return Size of the cache.
    * @return Size of the cache.
+   * @throws IOException If the cache is not initialized.
    */
    */
   protected long getCacheSize() throws IOException{
   protected long getCacheSize() throws IOException{
     if (this.locationCache != null) {
     if (this.locationCache != null) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MultipleDestinationMountTableResolver.java

@@ -44,8 +44,8 @@ import com.google.common.annotations.VisibleForTesting;
  * <p>
  * <p>
  * Does the Mount table entry for this path have multiple destinations?
  * Does the Mount table entry for this path have multiple destinations?
  * <ul>
  * <ul>
- * <li>No -> Return the location
- * <li>Yes -> Return all locations, prioritizing the best guess from the
+ * <li>No: Return the location
+ * <li>Yes: Return all locations, prioritizing the best guess from the
  * consistent hashing algorithm.
  * consistent hashing algorithm.
  * </ul>
  * </ul>
  * <p>
  * <p>

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java

@@ -190,9 +190,9 @@ public class NamenodeStatusReport {
   }
   }
 
 
   /**
   /**
-   * Get the HA service state.
+   * Set the HA service state.
    *
    *
-   * @return The HA service state.
+   * @param state The HA service state to set.
    */
    */
   public void setHAServiceState(HAServiceState state) {
   public void setHAServiceState(HAServiceState state) {
     this.status = state;
     this.status = state;
@@ -293,6 +293,7 @@ public class NamenodeStatusReport {
    * @param numBlocksPendingReplication Number of blocks pending replication.
    * @param numBlocksPendingReplication Number of blocks pending replication.
    * @param numBlocksUnderReplicated Number of blocks under replication.
    * @param numBlocksUnderReplicated Number of blocks under replication.
    * @param numBlocksPendingDeletion Number of blocks pending deletion.
    * @param numBlocksPendingDeletion Number of blocks pending deletion.
+   * @param providedSpace Space in provided storage.
    */
    */
   public void setNamesystemInfo(long available, long total,
   public void setNamesystemInfo(long available, long total,
       long numFiles, long numBlocks, long numBlocksMissing,
       long numFiles, long numBlocks, long numBlocksMissing,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java

@@ -83,7 +83,7 @@ public class AvailableSpaceResolver
    * caching to avoid too many calls. The cache might be updated asynchronously
    * caching to avoid too many calls. The cache might be updated asynchronously
    * to reduce latency.
    * to reduce latency.
    *
    *
-   * @return NamespaceId -> {@link SubclusterAvailableSpace}
+   * @return NamespaceId to {@link SubclusterAvailableSpace}.
    */
    */
   @Override
   @Override
   protected Map<String, SubclusterAvailableSpace> getSubclusterInfo(
   protected Map<String, SubclusterAvailableSpace> getSubclusterInfo(

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java

@@ -65,7 +65,7 @@ public class LocalResolver extends RouterResolver<String, String> {
    * too many calls. The cache might be updated asynchronously to reduce
    * too many calls. The cache might be updated asynchronously to reduce
    * latency.
    * latency.
    *
    *
-   * @return Node IP -> Subcluster.
+   * @return Node IP to Subcluster.
    */
    */
   @Override
   @Override
   protected Map<String, String> getSubclusterInfo(
   protected Map<String, String> getSubclusterInfo(

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/package-info.java

@@ -21,8 +21,9 @@
  * federation. The data resolvers collect data from the cluster, including from
  * federation. The data resolvers collect data from the cluster, including from
  * the state store. The resolvers expose APIs used by HDFS federation to collect
  * the state store. The resolvers expose APIs used by HDFS federation to collect
  * aggregated, cached data for use in Real-time request processing. The
  * aggregated, cached data for use in Real-time request processing. The
- * resolvers are perf-sensitive and are used in the flow of the
- * {@link RouterRpcServer} request path.
+ * resolvers are perf-sensitive and are used in the flow of the {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer
+ * RouterRpcServer} request path.
  * <p>
  * <p>
  * The principal resolvers are:
  * The principal resolvers are:
  * <ul>
  * <ul>

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java

@@ -151,6 +151,7 @@ public class ConnectionPool {
 
 
   /**
   /**
    * Get the clientIndex used to calculate index for lookup.
    * Get the clientIndex used to calculate index for lookup.
+   * @return Client index.
    */
    */
   @VisibleForTesting
   @VisibleForTesting
   public AtomicInteger getClientIndex() {
   public AtomicInteger getClientIndex() {
@@ -300,7 +301,7 @@ public class ConnectionPool {
    * Create a new proxy wrapper for a client NN connection.
    * Create a new proxy wrapper for a client NN connection.
    * @return Proxy for the target ClientProtocol that contains the user's
    * @return Proxy for the target ClientProtocol that contains the user's
    *         security context.
    *         security context.
-   * @throws IOException
+   * @throws IOException If it cannot get a new connection.
    */
    */
   public ConnectionContext newConnection() throws IOException {
   public ConnectionContext newConnection() throws IOException {
     return newConnection(
     return newConnection(

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NameserviceManager.java

@@ -33,18 +33,27 @@ public interface NameserviceManager {
 
 
   /**
   /**
    * Disable a name service.
    * Disable a name service.
+   * @param request Request to disable a name service.
+   * @return Response to disable a name service.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   DisableNameserviceResponse disableNameservice(
   DisableNameserviceResponse disableNameservice(
       DisableNameserviceRequest request) throws IOException;
       DisableNameserviceRequest request) throws IOException;
 
 
   /**
   /**
    * Enable a name service.
    * Enable a name service.
+   * @param request Request to enable a name service.
+   * @return Response to disable a name service.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   EnableNameserviceResponse enableNameservice(EnableNameserviceRequest request)
   EnableNameserviceResponse enableNameservice(EnableNameserviceRequest request)
       throws IOException;
       throws IOException;
 
 
   /**
   /**
    * Get the list of disabled name service.
    * Get the list of disabled name service.
+   * @param request Request to get the disabled name services.
+   * @return Response to get the disabled name services.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   GetDisabledNameservicesResponse getDisabledNameservices(
   GetDisabledNameservicesResponse getDisabledNameservices(
       GetDisabledNameservicesRequest request) throws IOException;
       GetDisabledNameservicesRequest request) throws IOException;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java

@@ -62,7 +62,7 @@ public class Quota {
    * @param namespaceQuota Name space quota.
    * @param namespaceQuota Name space quota.
    * @param storagespaceQuota Storage space quota.
    * @param storagespaceQuota Storage space quota.
    * @param type StorageType that the space quota is intended to be set on.
    * @param type StorageType that the space quota is intended to be set on.
-   * @throws IOException
+   * @throws IOException If the quota system is disabled.
    */
    */
   public void setQuota(String path, long namespaceQuota,
   public void setQuota(String path, long namespaceQuota,
       long storagespaceQuota, StorageType type) throws IOException {
       long storagespaceQuota, StorageType type) throws IOException {
@@ -91,7 +91,7 @@ public class Quota {
    * Get quota usage for the federation path.
    * Get quota usage for the federation path.
    * @param path Federation path.
    * @param path Federation path.
    * @return Aggregated quota.
    * @return Aggregated quota.
-   * @throws IOException
+   * @throws IOException If the quota system is disabled.
    */
    */
   public QuotaUsage getQuotaUsage(String path) throws IOException {
   public QuotaUsage getQuotaUsage(String path) throws IOException {
     rpcServer.checkOperation(OperationCategory.READ);
     rpcServer.checkOperation(OperationCategory.READ);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java

@@ -498,7 +498,7 @@ public class Router extends CompositeService {
   /**
   /**
    * Update the router state and heartbeat to the state store.
    * Update the router state and heartbeat to the state store.
    *
    *
-   * @param state The new router state.
+   * @param newState The new router state.
    */
    */
   public void updateRouterState(RouterServiceState newState) {
   public void updateRouterState(RouterServiceState newState) {
     this.state = newState;
     this.state = newState;
@@ -636,7 +636,8 @@ public class Router extends CompositeService {
   }
   }
 
 
   /**
   /**
-   * If the quota system is enabled in Router.
+   * Check if the quota system is enabled in Router.
+   * @return True if the quota system is enabled in Router.
    */
    */
   public boolean isQuotaEnabled() {
   public boolean isQuotaEnabled() {
     return this.quotaManager != null;
     return this.quotaManager != null;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java

@@ -415,8 +415,8 @@ public class RouterAdminServer extends AbstractService
    * control. This method will be invoked during each RPC call in router
    * control. This method will be invoked during each RPC call in router
    * admin server.
    * admin server.
    *
    *
-   * @return Router permission checker
-   * @throws AccessControlException
+   * @return Router permission checker.
+   * @throws AccessControlException If the user is not authorized.
    */
    */
   public static RouterPermissionChecker getPermissionChecker()
   public static RouterPermissionChecker getPermissionChecker()
       throws AccessControlException {
       throws AccessControlException {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java

@@ -142,9 +142,9 @@ public class RouterClientProtocol implements ClientProtocol {
   /**
   /**
    * The the delegation token from each name service.
    * The the delegation token from each name service.
    *
    *
-   * @param renewer
-   * @return Name service -> Token.
-   * @throws IOException
+   * @param renewer The token renewer.
+   * @return Name service to Token.
+   * @throws IOException If it cannot get the delegation token.
    */
    */
   public Map<FederationNamespaceInfo, Token<DelegationTokenIdentifier>>
   public Map<FederationNamespaceInfo, Token<DelegationTokenIdentifier>>
   getDelegationTokens(Text renewer) throws IOException {
   getDelegationTokens(Text renewer) throws IOException {

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaManager.java

@@ -50,6 +50,7 @@ public class RouterQuotaManager {
 
 
   /**
   /**
    * Get all the mount quota paths.
    * Get all the mount quota paths.
+   * @return All the mount quota paths.
    */
    */
   public Set<String> getAll() {
   public Set<String> getAll() {
     readLock.lock();
     readLock.lock();
@@ -88,8 +89,8 @@ public class RouterQuotaManager {
 
 
   /**
   /**
    * Get children paths (can including itself) under specified federation path.
    * Get children paths (can including itself) under specified federation path.
-   * @param parentPath
-   * @return Set<String> Children path set.
+   * @param parentPath Federated path.
+   * @return Set of children paths.
    */
    */
   public Set<String> getPaths(String parentPath) {
   public Set<String> getPaths(String parentPath) {
     readLock.lock();
     readLock.lock();
@@ -154,6 +155,7 @@ public class RouterQuotaManager {
   /**
   /**
    * Check if the quota was set.
    * Check if the quota was set.
    * @param quota RouterQuotaUsage set in mount table.
    * @param quota RouterQuotaUsage set in mount table.
+   * @return True if the quota is set.
    */
    */
   public boolean isQuotaSet(RouterQuotaUsage quota) {
   public boolean isQuotaSet(RouterQuotaUsage quota) {
     if (quota != null) {
     if (quota != null) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterQuotaUsage.java

@@ -72,7 +72,7 @@ public final class RouterQuotaUsage extends QuotaUsage {
   /**
   /**
    * Verify if namespace quota is violated once quota is set. Relevant
    * Verify if namespace quota is violated once quota is set. Relevant
    * method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
    * method {@link DirectoryWithQuotaFeature#verifyNamespaceQuota}.
-   * @throws NSQuotaExceededException
+   * @throws NSQuotaExceededException If the quota is exceeded.
    */
    */
   public void verifyNamespaceQuota() throws NSQuotaExceededException {
   public void verifyNamespaceQuota() throws NSQuotaExceededException {
     if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) {
     if (Quota.isViolated(getQuota(), getFileAndDirectoryCount())) {
@@ -84,7 +84,7 @@ public final class RouterQuotaUsage extends QuotaUsage {
   /**
   /**
    * Verify if storage space quota is violated once quota is set. Relevant
    * Verify if storage space quota is violated once quota is set. Relevant
    * method {@link DirectoryWithQuotaFeature#verifyStoragespaceQuota}.
    * method {@link DirectoryWithQuotaFeature#verifyStoragespaceQuota}.
-   * @throws DSQuotaExceededException
+   * @throws DSQuotaExceededException If the quota is exceeded.
    */
    */
   public void verifyStoragespaceQuota() throws DSQuotaExceededException {
   public void verifyStoragespaceQuota() throws DSQuotaExceededException {
     if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) {
     if (Quota.isViolated(getSpaceQuota(), getSpaceConsumed())) {

+ 15 - 8
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java

@@ -70,7 +70,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
 /**
 /**
- * A client proxy for Router -> NN communication using the NN ClientProtocol.
+ * A client proxy for Router to NN communication using the NN ClientProtocol.
  * <p>
  * <p>
  * Provides routers to invoke remote ClientProtocol methods and handle
  * Provides routers to invoke remote ClientProtocol methods and handle
  * retries/failover.
  * retries/failover.
@@ -584,7 +584,7 @@ public class RouterRpcClient {
    * @param block Block used to determine appropriate nameservice.
    * @param block Block used to determine appropriate nameservice.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
    */
   public Object invokeSingle(final ExtendedBlock block, RemoteMethod method)
   public Object invokeSingle(final ExtendedBlock block, RemoteMethod method)
       throws IOException {
       throws IOException {
@@ -602,7 +602,7 @@ public class RouterRpcClient {
    * @param bpId Block pool identifier.
    * @param bpId Block pool identifier.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
    */
   public Object invokeSingleBlockPool(final String bpId, RemoteMethod method)
   public Object invokeSingleBlockPool(final String bpId, RemoteMethod method)
       throws IOException {
       throws IOException {
@@ -619,7 +619,7 @@ public class RouterRpcClient {
    * @param nsId Target namespace for the method.
    * @param nsId Target namespace for the method.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @return The result of invoking the method.
    * @return The result of invoking the method.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
    */
   public Object invokeSingle(final String nsId, RemoteMethod method)
   public Object invokeSingle(final String nsId, RemoteMethod method)
       throws IOException {
       throws IOException {
@@ -639,6 +639,7 @@ public class RouterRpcClient {
    * Re-throws exceptions generated by the remote RPC call as either
    * Re-throws exceptions generated by the remote RPC call as either
    * RemoteException or IOException.
    * RemoteException or IOException.
    *
    *
+   * @param <T> The type of the remote method return.
    * @param nsId Target namespace for the method.
    * @param nsId Target namespace for the method.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @param clazz Class for the return type.
    * @param clazz Class for the return type.
@@ -661,7 +662,7 @@ public class RouterRpcClient {
    * @param location RemoteLocation to invoke.
    * @param location RemoteLocation to invoke.
    * @param remoteMethod The remote method and parameters to invoke.
    * @param remoteMethod The remote method and parameters to invoke.
    * @return The result of invoking the method if successful.
    * @return The result of invoking the method if successful.
-   * @throws IOException
+   * @throws IOException If the invoke generated an error.
    */
    */
   public Object invokeSingle(final RemoteLocationContext location,
   public Object invokeSingle(final RemoteLocationContext location,
       RemoteMethod remoteMethod) throws IOException {
       RemoteMethod remoteMethod) throws IOException {
@@ -700,6 +701,7 @@ public class RouterRpcClient {
    * If no expected result class/values are specified, the success condition is
    * If no expected result class/values are specified, the success condition is
    * a call that does not throw a remote exception.
    * a call that does not throw a remote exception.
    *
    *
+   * @param <T> The type of the remote method return.
    * @param locations List of locations/nameservices to call concurrently.
    * @param locations List of locations/nameservices to call concurrently.
    * @param remoteMethod The remote method and parameters to invoke.
    * @param remoteMethod The remote method and parameters to invoke.
    * @param expectedResultClass In order to be considered a positive result, the
    * @param expectedResultClass In order to be considered a positive result, the
@@ -871,6 +873,8 @@ public class RouterRpcClient {
   /**
   /**
    * Invoke method in all locations and return success if any succeeds.
    * Invoke method in all locations and return success if any succeeds.
    *
    *
+   * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @return If the call succeeds in any location.
    * @return If the call succeeds in any location.
@@ -899,6 +903,7 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    * RemoteException or IOException.
    *
    *
    * @param <T> The type of the remote location.
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @throws IOException If all the calls throw an exception.
    * @throws IOException If all the calls throw an exception.
@@ -917,9 +922,10 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    * RemoteException or IOException.
    *
    *
    * @param <T> The type of the remote location.
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If all the calls throw an exception.
    * @throws IOException If all the calls throw an exception.
    */
    */
   public <T extends RemoteLocationContext, R> Map<T, R> invokeConcurrent(
   public <T extends RemoteLocationContext, R> Map<T, R> invokeConcurrent(
@@ -936,6 +942,7 @@ public class RouterRpcClient {
    * RemoteException or IOException.
    * RemoteException or IOException.
    *
    *
    * @param <T> The type of the remote location.
    * @param <T> The type of the remote location.
+   * @param <R> The type of the remote method return.
    * @param locations List of remote locations to call concurrently.
    * @param locations List of remote locations to call concurrently.
    * @param method The remote method and parameters to invoke.
    * @param method The remote method and parameters to invoke.
    * @param requireResponse If true an exception will be thrown if all calls do
    * @param requireResponse If true an exception will be thrown if all calls do
@@ -966,7 +973,7 @@ public class RouterRpcClient {
    *          successfully received are returned.
    *          successfully received are returned.
    * @param standby If the requests should go to the standby namenodes too.
    * @param standby If the requests should go to the standby namenodes too.
    * @param clazz Type of the remote return type.
    * @param clazz Type of the remote return type.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If requiredResponse=true and any of the calls throw an
    * @throws IOException If requiredResponse=true and any of the calls throw an
    *           exception.
    *           exception.
    */
    */
@@ -995,7 +1002,7 @@ public class RouterRpcClient {
    * @param standby If the requests should go to the standby namenodes too.
    * @param standby If the requests should go to the standby namenodes too.
    * @param timeOutMs Timeout for each individual call.
    * @param timeOutMs Timeout for each individual call.
    * @param clazz Type of the remote return type.
    * @param clazz Type of the remote return type.
-   * @return Result of invoking the method per subcluster: nsId -> result.
+   * @return Result of invoking the method per subcluster: nsId to result.
    * @throws IOException If requiredResponse=true and any of the calls throw an
    * @throws IOException If requiredResponse=true and any of the calls throw an
    *           exception.
    *           exception.
    */
    */

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java

@@ -354,6 +354,8 @@ public class RouterRpcServer extends AbstractService
 
 
   /**
   /**
    * Get the active namenode resolver
    * Get the active namenode resolver
+   *
+   * @return Active namenode resolver.
    */
    */
   public ActiveNamenodeResolver getNamenodeResolver() {
   public ActiveNamenodeResolver getNamenodeResolver() {
     return namenodeResolver;
     return namenodeResolver;
@@ -786,8 +788,8 @@ public class RouterRpcServer extends AbstractService
    * Get the list of datanodes per subcluster.
    * Get the list of datanodes per subcluster.
    *
    *
    * @param type Type of the datanodes to get.
    * @param type Type of the datanodes to get.
-   * @return nsId -> datanode list.
-   * @throws IOException
+   * @return nsId to datanode list.
+   * @throws IOException If the method cannot be invoked remotely.
    */
    */
   public Map<String, DatanodeStorageReport[]> getDatanodeStorageReportMap(
   public Map<String, DatanodeStorageReport[]> getDatanodeStorageReportMap(
       DatanodeReportType type) throws IOException {
       DatanodeReportType type) throws IOException {
@@ -1414,7 +1416,9 @@ public class RouterRpcServer extends AbstractService
 
 
   /**
   /**
    * Merge the outputs from multiple namespaces.
    * Merge the outputs from multiple namespaces.
-   * @param map Namespace -> Output array.
+   *
+   * @param <T> The type of the objects to merge.
+   * @param map Namespace to Output array.
    * @param clazz Class of the values.
    * @param clazz Class of the values.
    * @return Array with the outputs.
    * @return Array with the outputs.
    */
    */
@@ -1434,6 +1438,7 @@ public class RouterRpcServer extends AbstractService
 
 
   /**
   /**
    * Convert a set of values into an array.
    * Convert a set of values into an array.
+   * @param <T> The type of the return objects.
    * @param set Input set.
    * @param set Input set.
    * @param clazz Class of the values.
    * @param clazz Class of the values.
    * @return Array with the values in set.
    * @return Array with the values in set.
@@ -1446,7 +1451,8 @@ public class RouterRpcServer extends AbstractService
   }
   }
 
 
   /**
   /**
-   * Get quota module implement.
+   * Get quota module implementation.
+   * @return Quota module implementation
    */
    */
   public Quota getQuotaModule() {
   public Quota getQuotaModule() {
     return this.quotaCall;
     return this.quotaCall;

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterSafemodeService.java

@@ -28,10 +28,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
 /**
 /**
- * Service to periodically check if the {@link org.apache.hadoop.hdfs.server.
- * federation.store.StateStoreService StateStoreService} cached information in
- * the {@link Router} is up to date. This is for performance and removes the
- * {@link org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * Service to periodically check if the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * StateStoreService} cached information in the {@link Router} is up to date.
+ * This is for performance and removes the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
  * StateStoreService} from the critical path in common operations.
  * StateStoreService} from the critical path in common operations.
  */
  */
 public class RouterSafemodeService extends PeriodicService {
 public class RouterSafemodeService extends PeriodicService {
@@ -45,12 +46,17 @@ public class RouterSafemodeService extends PeriodicService {
   /**
   /**
    * If we are in safe mode, fail requests as if a standby NN.
    * If we are in safe mode, fail requests as if a standby NN.
    * Router can enter safe mode in two different ways:
    * Router can enter safe mode in two different ways:
-   *   1. upon start up: router enters this mode after service start, and will
-   *      exit after certain time threshold;
-   *   2. via admin command: router enters this mode via admin command:
-   *        dfsrouteradmin -safemode enter
-   *      and exit after admin command:
-   *        dfsrouteradmin -safemode leave
+   * <ul>
+   * <li>Upon start up: router enters this mode after service start, and will
+   * exit after certain time threshold.
+   * <li>Via admin command:
+   * <ul>
+   * <li>Router enters this mode via admin command:
+   * dfsrouteradmin -safemode enter
+   * <li>And exit after admin command:
+   * dfsrouteradmin -safemode leave
+   * </ul>
+   * </ul>
    */
    */
 
 
   /** Whether Router is in safe mode */
   /** Whether Router is in safe mode */

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStateManager.java

@@ -32,18 +32,27 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResp
 public interface RouterStateManager {
 public interface RouterStateManager {
   /**
   /**
    * Enter safe mode and change Router state to RouterServiceState#SAFEMODE.
    * Enter safe mode and change Router state to RouterServiceState#SAFEMODE.
+   * @param request Request to enter safe mode.
+   * @return Response to enter safe mode.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
   EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
       throws IOException;
       throws IOException;
 
 
   /**
   /**
    * Leave safe mode and change Router state to RouterServiceState#RUNNING.
    * Leave safe mode and change Router state to RouterServiceState#RUNNING.
+   * @param request Request to leave safe mode.
+   * @return Response to leave safe mode.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
   LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
       throws IOException;
       throws IOException;
 
 
   /**
   /**
    * Verify if current Router state is safe mode.
    * Verify if current Router state is safe mode.
+   * @param request Request to get the safe mode state.
+   * @return Response to get the safe mode state.
+   * @throws IOException If it cannot perform the operation.
    */
    */
   GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
   GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
       throws IOException;
       throws IOException;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java

@@ -167,7 +167,7 @@ public abstract class CachedRecordStore<R extends BaseRecord>
    * expired state.
    * expired state.
    *
    *
    * @param query RecordQueryResult containing the data to be inspected.
    * @param query RecordQueryResult containing the data to be inspected.
-   * @throws IOException
+   * @throws IOException If the values cannot be updated.
    */
    */
   public void overrideExpiredRecords(QueryResult<R> query) throws IOException {
   public void overrideExpiredRecords(QueryResult<R> query) throws IOException {
     List<R> commitRecords = new ArrayList<>();
     List<R> commitRecords = new ArrayList<>();
@@ -194,7 +194,7 @@ public abstract class CachedRecordStore<R extends BaseRecord>
    * expired state.
    * expired state.
    *
    *
    * @param record Record record to be updated.
    * @param record Record record to be updated.
-   * @throws IOException
+   * @throws IOException If the values cannot be updated.
    */
    */
   public void overrideExpiredRecord(R record) throws IOException {
   public void overrideExpiredRecord(R record) throws IOException {
     List<R> newRecords = Collections.singletonList(record);
     List<R> newRecords = Collections.singletonList(record);

+ 9 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/MembershipStore.java

@@ -33,12 +33,13 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateNamenodeReg
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState;
 
 
 /**
 /**
- * Management API for NameNode registrations stored in
- * {@link org.apache.hadoop.hdfs.server.federation.store.records.MembershipState
- * MembershipState} records. The {@link org.apache.hadoop.hdfs.server.
- * federation.router.RouterHeartbeatService RouterHeartbeatService} periodically
- * polls each NN to update the NameNode metadata(addresses, operational) and HA
- * state(active, standby). Each NameNode may be polled by multiple
+ * Management API for NameNode registrations stored in {@link
+ * org.apache.hadoop.hdfs.server.federation.store.records.MembershipState
+ * MembershipState} records. The {@link
+ * org.apache.hadoop.hdfs.server.federation.router.RouterHeartbeatService
+ * RouterHeartbeatService} periodically polls each NN to update the NameNode
+ * metadata(addresses, operational) and HA state(active, standby). Each
+ * NameNode may be polled by multiple
  * {@link org.apache.hadoop.hdfs.server.federation.router.Router Router}
  * {@link org.apache.hadoop.hdfs.server.federation.router.Router Router}
  * instances.
  * instances.
  * <p>
  * <p>
@@ -90,6 +91,7 @@ public abstract class MembershipStore
   /**
   /**
    * Get the expired registrations from the registration cache.
    * Get the expired registrations from the registration cache.
    *
    *
+   * @param request Request to get the expired registrations.
    * @return Expired registrations or zero-length list if none are found.
    * @return Expired registrations or zero-length list if none are found.
    * @throws StateStoreUnavailableException Throws exception if the data store
    * @throws StateStoreUnavailableException Throws exception if the data store
    *           is not initialized.
    *           is not initialized.
@@ -103,7 +105,7 @@ public abstract class MembershipStore
   /**
   /**
    * Retrieves a list of registered nameservices and their associated info.
    * Retrieves a list of registered nameservices and their associated info.
    *
    *
-   * @param request
+   * @param request Request to get the name spaces.
    * @return Collection of information for each registered nameservice.
    * @return Collection of information for each registered nameservice.
    * @throws IOException if the data store could not be queried or the query is
    * @throws IOException if the data store could not be queried or the query is
    *           invalid.
    *           invalid.

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RouterStore.java

@@ -31,11 +31,11 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.RouterHeartbeatRe
 import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
 import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
 
 
 /**
 /**
- * Management API for
- * {@link org.apache.hadoop.hdfs.server.federation.store.records.RouterState
- *  RouterState} records in the state store. Accesses the data store via the
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.
- * StateStoreDriver StateStoreDriver} interface. No data is cached.
+ * Management API for {@link
+ * org.apache.hadoop.hdfs.server.federation.store.records.RouterState
+ * RouterState} records in the state store. Accesses the data store via the
+ * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
+ * StateStoreDriver} interface. No data is cached.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving

+ 9 - 10
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java

@@ -60,12 +60,12 @@ import com.google.common.annotations.VisibleForTesting;
  * StateStoreDriver} and maintain the connection to the data store. There are
  * StateStoreDriver} and maintain the connection to the data store. There are
  * multiple state store driver connections supported:
  * multiple state store driver connections supported:
  * <ul>
  * <ul>
- * <li>File
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.impl.
- * StateStoreFileImpl StateStoreFileImpl}
- * <li>ZooKeeper
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.impl.
- * StateStoreZooKeeperImpl StateStoreZooKeeperImpl}
+ * <li>File {@link
+ * org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl
+ * StateStoreFileImpl}
+ * <li>ZooKeeper {@link
+ * org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl
+ * StateStoreZooKeeperImpl}
  * </ul>
  * </ul>
  * <p>
  * <p>
  * The service also supports the dynamic registration of record stores like:
  * The service also supports the dynamic registration of record stores like:
@@ -74,10 +74,8 @@ import com.google.common.annotations.VisibleForTesting;
  * federation.
  * federation.
  * <li>{@link MountTableStore}: Mount table between to subclusters.
  * <li>{@link MountTableStore}: Mount table between to subclusters.
  * See {@link org.apache.hadoop.fs.viewfs.ViewFs ViewFs}.
  * See {@link org.apache.hadoop.fs.viewfs.ViewFs ViewFs}.
- * <li>{@link RebalancerStore}: Log of the rebalancing operations.
  * <li>{@link RouterStore}: Router state in the federation.
  * <li>{@link RouterStore}: Router state in the federation.
  * <li>{@link DisabledNameserviceStore}: Disabled name services.
  * <li>{@link DisabledNameserviceStore}: Disabled name services.
- * <li>{@link TokenStore}: Tokens in the federation.
  * </ul>
  * </ul>
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -130,10 +128,10 @@ public class StateStoreService extends CompositeService {
   }
   }
 
 
   /**
   /**
-   * Initialize the State Store and the connection to the backend.
+   * Initialize the State Store and the connection to the back-end.
    *
    *
    * @param config Configuration for the State Store.
    * @param config Configuration for the State Store.
-   * @throws IOException
+   * @throws IOException Cannot create driver for the State Store.
    */
    */
   @Override
   @Override
   protected void serviceInit(Configuration config) throws Exception {
   protected void serviceInit(Configuration config) throws Exception {
@@ -214,6 +212,7 @@ public class StateStoreService extends CompositeService {
    * Add a record store to the State Store. It includes adding the store, the
    * Add a record store to the State Store. It includes adding the store, the
    * supported record and the cache management.
    * supported record and the cache management.
    *
    *
+   * @param <T> Type of the records stored.
    * @param clazz Class of the record store to track.
    * @param clazz Class of the record store to track.
    * @return New record store.
    * @return New record store.
    * @throws ReflectiveOperationException
    * @throws ReflectiveOperationException

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java

@@ -42,6 +42,7 @@ public final class StateStoreUtils {
    * Get the base class for a record class. If we get an implementation of a
    * Get the base class for a record class. If we get an implementation of a
    * record we will return the real parent record class.
    * record we will return the real parent record class.
    *
    *
+   * @param <T> Type of the class of the data record to check.
    * @param clazz Class of the data record to check.
    * @param clazz Class of the data record to check.
    * @return Base class for the record.
    * @return Base class for the record.
    */
    */
@@ -67,6 +68,7 @@ public final class StateStoreUtils {
    * Get the base class for a record. If we get an implementation of a record we
    * Get the base class for a record. If we get an implementation of a record we
    * will return the real parent record class.
    * will return the real parent record class.
    *
    *
+   * @param <T> Type of the class of the data record.
    * @param record Record to check its main class.
    * @param record Record to check its main class.
    * @return Base class for the record.
    * @return Base class for the record.
    */
    */
@@ -79,6 +81,7 @@ public final class StateStoreUtils {
    * Get the base class name for a record. If we get an implementation of a
    * Get the base class name for a record. If we get an implementation of a
    * record we will return the real parent record class.
    * record we will return the real parent record class.
    *
    *
+   * @param <T> Type of the class of the data record.
    * @param clazz Class of the data record to check.
    * @param clazz Class of the data record to check.
    * @return Name of the base class for the record.
    * @return Name of the base class for the record.
    */
    */
@@ -90,6 +93,7 @@ public final class StateStoreUtils {
   /**
   /**
    * Filters a list of records to find all records matching the query.
    * Filters a list of records to find all records matching the query.
    *
    *
+   * @param <T> Type of the class of the data record.
    * @param query Map of field names and objects to use to filter results.
    * @param query Map of field names and objects to use to filter results.
    * @param records List of data records to filter.
    * @param records List of data records to filter.
    * @return List of all records matching the query (or empty list if none
    * @return List of all records matching the query (or empty list if none

+ 11 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreRecordOperations.java

@@ -41,8 +41,9 @@ public interface StateStoreRecordOperations {
    * of the records on each call. It is recommended to override the default
    * of the records on each call. It is recommended to override the default
    * implementations for better performance.
    * implementations for better performance.
    *
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
    * @param clazz Class of record to fetch.
-   * @return List of all records that match the clazz.
+   * @return List of all records that match the class.
    * @throws IOException Throws exception if unable to query the data store.
    * @throws IOException Throws exception if unable to query the data store.
    */
    */
   @Idempotent
   @Idempotent
@@ -51,6 +52,7 @@ public interface StateStoreRecordOperations {
   /**
   /**
    * Get a single record from the store that matches the query.
    * Get a single record from the store that matches the query.
    *
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
    * @param clazz Class of record to fetch.
    * @param query Query to filter results.
    * @param query Query to filter results.
    * @return A single record matching the query. Null if there are no matching
    * @return A single record matching the query. Null if there are no matching
@@ -67,10 +69,11 @@ public interface StateStoreRecordOperations {
    * assumes the underlying driver does not support filtering. If the driver
    * assumes the underlying driver does not support filtering. If the driver
    * supports filtering it should overwrite this method.
    * supports filtering it should overwrite this method.
    *
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of record to fetch.
    * @param clazz Class of record to fetch.
    * @param query Query to filter results.
    * @param query Query to filter results.
-   * @return Records of type clazz that match the query or empty list if none
-   *         are found.
+   * @return Records of type class that match the query or empty list if none
+   * are found.
    * @throws IOException Throws exception if unable to query the data store.
    * @throws IOException Throws exception if unable to query the data store.
    */
    */
   @Idempotent
   @Idempotent
@@ -81,6 +84,7 @@ public interface StateStoreRecordOperations {
    * Creates a single record. Optionally updates an existing record with same
    * Creates a single record. Optionally updates an existing record with same
    * primary key.
    * primary key.
    *
    *
+   * @param <T> Record class of the records.
    * @param record The record to insert or update.
    * @param record The record to insert or update.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param errorIfExists True if an error should be returned when inserting
    * @param errorIfExists True if an error should be returned when inserting
@@ -97,9 +101,9 @@ public interface StateStoreRecordOperations {
    * Creates multiple records. Optionally updates existing records that have
    * Creates multiple records. Optionally updates existing records that have
    * the same primary key.
    * the same primary key.
    *
    *
+   * @param <T> Record class of the records.
    * @param records List of data records to update or create. All records must
    * @param records List of data records to update or create. All records must
    *                be of class clazz.
    *                be of class clazz.
-   * @param clazz Record class of records.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param allowUpdate True if update of exiting record is allowed.
    * @param errorIfExists True if an error should be returned when inserting
    * @param errorIfExists True if an error should be returned when inserting
    *          an existing record. Only used if allowUpdate = false.
    *          an existing record. Only used if allowUpdate = false.
@@ -115,6 +119,7 @@ public interface StateStoreRecordOperations {
   /**
   /**
    * Remove a single record.
    * Remove a single record.
    *
    *
+   * @param <T> Record class of the records.
    * @param record Record to be removed.
    * @param record Record to be removed.
    * @return true If the record was successfully removed. False if the record
    * @return true If the record was successfully removed. False if the record
    *              could not be removed or not stored.
    *              could not be removed or not stored.
@@ -126,6 +131,7 @@ public interface StateStoreRecordOperations {
   /**
   /**
    * Remove all records of this class from the store.
    * Remove all records of this class from the store.
    *
    *
+   * @param <T> Record class of the records.
    * @param clazz Class of records to remove.
    * @param clazz Class of records to remove.
    * @return True if successful.
    * @return True if successful.
    * @throws IOException Throws exception if unable to query the data store.
    * @throws IOException Throws exception if unable to query the data store.
@@ -137,6 +143,7 @@ public interface StateStoreRecordOperations {
    * Remove multiple records of a specific class that match a query. Requires
    * Remove multiple records of a specific class that match a query. Requires
    * the getAll implementation to fetch fresh records on each call.
    * the getAll implementation to fetch fresh records on each call.
    *
    *
+   * @param <T> Record class of the records.
    * @param query Query to filter what to remove.
    * @param query Query to filter what to remove.
    * @return The number of records removed.
    * @return The number of records removed.
    * @throws IOException Throws exception if unable to query the data store.
    * @throws IOException Throws exception if unable to query the data store.

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreBaseImpl.java

@@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
  * Drivers may optionally override additional routines for performance
  * Drivers may optionally override additional routines for performance
  * optimization, such as custom get/put/remove queries, depending on the
  * optimization, such as custom get/put/remove queries, depending on the
  * capabilities of the data store.
  * capabilities of the data store.
- * <p>
  */
  */
 public abstract class StateStoreBaseImpl extends StateStoreDriver {
 public abstract class StateStoreBaseImpl extends StateStoreDriver {
 
 

+ 5 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/package-info.java

@@ -29,15 +29,14 @@
  * The state store uses a modular data storage
  * The state store uses a modular data storage
  * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
  * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
  * StateStoreDriver} to handle querying, updating and deleting data records. The
  * StateStoreDriver} to handle querying, updating and deleting data records. The
- * data storage driver is initialized and maintained by the
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * StateStoreService FederationStateStoreService}. The state store
+ * data storage driver is initialized and maintained by the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * FederationStateStoreService}. The state store
  * supports fetching all records of a type, filtering by column values or
  * supports fetching all records of a type, filtering by column values or
  * fetching a single record by its primary key.
  * fetching a single record by its primary key.
  * <p>
  * <p>
  * The state store contains several API interfaces, one for each data records
  * The state store contains several API interfaces, one for each data records
  * type.
  * type.
- * <p>
  * <ul>
  * <ul>
  * <li>FederationMembershipStateStore: state of all Namenodes in the federation.
  * <li>FederationMembershipStateStore: state of all Namenodes in the federation.
  * Uses the MembershipState record.
  * Uses the MembershipState record.
@@ -46,10 +45,9 @@
  * <li>RouterStateStore: State of all routers in the federation. Uses the
  * <li>RouterStateStore: State of all routers in the federation. Uses the
  * RouterState record.
  * RouterState record.
  * </ul>
  * </ul>
- * <p>
  * Each API is defined in a separate interface. The implementations of these
  * Each API is defined in a separate interface. The implementations of these
- * interfaces are responsible for accessing the
- * {@link org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
+ * interfaces are responsible for accessing the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver
  * StateStoreDriver} to query, update and delete data records.
  * StateStoreDriver} to query, update and delete data records.
  */
  */
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/BaseRecord.java

@@ -75,8 +75,8 @@ public abstract class BaseRecord implements Comparable<BaseRecord> {
   public abstract long getExpirationMs();
   public abstract long getExpirationMs();
 
 
   /**
   /**
-   * Map of primary key names->values for the record. The primary key can be a
-   * combination of 1-n different State Store serialized values.
+   * Map of primary key names to values for the record. The primary key can be
+   * a combination of 1-n different State Store serialized values.
    *
    *
    * @return Map of key/value pairs that constitute this object's primary key.
    * @return Map of key/value pairs that constitute this object's primary key.
    */
    */

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MembershipState.java

@@ -21,7 +21,6 @@ import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNameno
 import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.EXPIRED;
 import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.EXPIRED;
 import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.UNAVAILABLE;
 import static org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState.UNAVAILABLE;
 
 
-import java.io.IOException;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.SortedMap;
 import java.util.SortedMap;
 import java.util.TreeMap;
 import java.util.TreeMap;
@@ -69,7 +68,6 @@ public abstract class MembershipState extends BaseRecord
   /**
   /**
    * Create a new membership instance.
    * Create a new membership instance.
    * @return Membership instance.
    * @return Membership instance.
-   * @throws IOException
    */
    */
   public static MembershipState newInstance() {
   public static MembershipState newInstance() {
     MembershipState record =
     MembershipState record =
@@ -93,7 +91,6 @@ public abstract class MembershipState extends BaseRecord
    * @param state State of the federation.
    * @param state State of the federation.
    * @param safemode If the safe mode is enabled.
    * @param safemode If the safe mode is enabled.
    * @return Membership instance.
    * @return Membership instance.
-   * @throws IOException If we cannot create the instance.
    */
    */
   public static MembershipState newInstance(String router, String nameservice,
   public static MembershipState newInstance(String router, String nameservice,
       String namenode, String clusterId, String blockPoolId, String rpcAddress,
       String namenode, String clusterId, String blockPoolId, String rpcAddress,

+ 15 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/MountTable.java

@@ -39,12 +39,11 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /**
 /**
- * Data schema for
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * MountTableStore FederationMountTableStore} data stored in the
- * {@link org.apache.hadoop.hdfs.server.federation.store.
- * StateStoreService FederationStateStoreService}. Supports string
- * serialization.
+ * Data schema for {@link
+ * org.apache.hadoop.hdfs.server.federation.store.MountTableStore
+ * FederationMountTableStore} data stored in the {@link
+ * org.apache.hadoop.hdfs.server.federation.store.StateStoreService
+ * FederationStateStoreService}. Supports string serialization.
  */
  */
 public abstract class MountTable extends BaseRecord {
 public abstract class MountTable extends BaseRecord {
 
 
@@ -100,10 +99,11 @@ public abstract class MountTable extends BaseRecord {
    * Constructor for a mount table entry with a single destinations.
    * Constructor for a mount table entry with a single destinations.
    *
    *
    * @param src Source path in the mount entry.
    * @param src Source path in the mount entry.
-   * @param destinations Nameservice destination of the mount point.
+   * @param destinations Name service destination of the mount point.
    * @param dateCreated Created date.
    * @param dateCreated Created date.
    * @param dateModified Modified date.
    * @param dateModified Modified date.
-   * @throws IOException
+   * @return New mount table instance.
+   * @throws IOException If it cannot be created.
    */
    */
   public static MountTable newInstance(final String src,
   public static MountTable newInstance(final String src,
       final Map<String, String> destinations,
       final Map<String, String> destinations,
@@ -119,8 +119,8 @@ public abstract class MountTable extends BaseRecord {
    * Constructor for a mount table entry with multiple destinations.
    * Constructor for a mount table entry with multiple destinations.
    *
    *
    * @param src Source path in the mount entry.
    * @param src Source path in the mount entry.
-   * @param destinations Nameservice destinations of the mount point.
-   * @throws IOException
+   * @param destinations Name service destinations of the mount point.
+   * @throws IOException If it cannot be created.
    */
    */
   public static MountTable newInstance(final String src,
   public static MountTable newInstance(final String src,
       final Map<String, String> destinations) throws IOException {
       final Map<String, String> destinations) throws IOException {
@@ -187,12 +187,16 @@ public abstract class MountTable extends BaseRecord {
   /**
   /**
    * Set the destination paths.
    * Set the destination paths.
    *
    *
-   * @param paths Destination paths.
+   * @param dests Destination paths.
    */
    */
   public abstract void setDestinations(List<RemoteLocation> dests);
   public abstract void setDestinations(List<RemoteLocation> dests);
 
 
   /**
   /**
    * Add a new destination to this mount table entry.
    * Add a new destination to this mount table entry.
+   *
+   * @param nsId Name service identifier.
+   * @param path Path in the remote name service.
+   * @return If the destination was added.
    */
    */
   public abstract boolean addDestination(String nsId, String path);
   public abstract boolean addDestination(String nsId, String path);
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/records/Query.java

@@ -31,7 +31,7 @@ public class Query<T extends BaseRecord> {
   /**
   /**
    * Create a query to search for a partial record.
    * Create a query to search for a partial record.
    *
    *
-   * @param partial It defines the attributes to search.
+   * @param part It defines the attributes to search.
    */
    */
   public Query(final T part) {
   public Query(final T part) {
     this.partial = part;
     this.partial = part;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/utils/ConsistentHashRing.java

@@ -103,7 +103,7 @@ public class ConsistentHashRing {
 
 
   /**
   /**
    * Return location (owner) of specified item. Owner is the next
    * Return location (owner) of specified item. Owner is the next
-   * entry on the hash ring (with a hash value > hash value of item).
+   * entry on the hash ring (with a hash value &gt; hash value of item).
    * @param item Item to look for.
    * @param item Item to look for.
    * @return The location of the item.
    * @return The location of the item.
    */
    */

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java

@@ -340,6 +340,8 @@ public class RouterAdmin extends Configured implements Tool {
    *
    *
    * @param parameters Parameters for the mount point.
    * @param parameters Parameters for the mount point.
    * @param i Index in the parameters.
    * @param i Index in the parameters.
+   * @return If it was successful.
+   * @throws IOException If it cannot add the mount point.
    */
    */
   public boolean addMount(String[] parameters, int i) throws IOException {
   public boolean addMount(String[] parameters, int i) throws IOException {
     // Mandatory parameters
     // Mandatory parameters
@@ -495,6 +497,8 @@ public class RouterAdmin extends Configured implements Tool {
    *
    *
    * @param parameters Parameters for the mount point.
    * @param parameters Parameters for the mount point.
    * @param i Index in the parameters.
    * @param i Index in the parameters.
+   * @return If it updated the mount point successfully.
+   * @throws IOException If there is an error.
    */
    */
   public boolean updateMount(String[] parameters, int i) throws IOException {
   public boolean updateMount(String[] parameters, int i) throws IOException {
     // Mandatory parameters
     // Mandatory parameters
@@ -599,6 +603,7 @@ public class RouterAdmin extends Configured implements Tool {
    * Remove mount point.
    * Remove mount point.
    *
    *
    * @param path Path to remove.
    * @param path Path to remove.
+   * @return If the mount point was removed successfully.
    * @throws IOException If it cannot be removed.
    * @throws IOException If it cannot be removed.
    */
    */
   public boolean removeMount(String path) throws IOException {
   public boolean removeMount(String path) throws IOException {