Explorar o código

HDFS-16803.Improve some annotations in hdfs module. (#5031)

jianghuazhu %!s(int64=2) %!d(string=hai) anos
pai
achega
c5c00f3d2c
Modificáronse 13 ficheiros con 26 adicións e 22 borrados
  1. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  2. 3 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
  3. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java
  4. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java
  5. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  6. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java
  7. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
  8. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java
  9. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
  11. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java
  12. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
  13. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -568,7 +568,7 @@ public class DistributedFileSystem extends FileSystem
 
   /**
    * Same as
-   * {@link #create(Path, FsPermission, EnumSet<CreateFlag>, int, short, long,
+   * {@link #create(Path, FsPermission, EnumSet, int, short, long,
    * Progressable, ChecksumOpt)} with a few additions. First, addition of
    * favoredNodes that is a hint to where the namenode should place the file
    * blocks. The favored nodes hint is not persisted in HDFS. Hence it may be
@@ -637,12 +637,12 @@ public class DistributedFileSystem extends FileSystem
 
   /**
    * Similar to {@link #create(Path, FsPermission, EnumSet, int, short, long,
-   * Progressable, ChecksumOpt, InetSocketAddress[], String)}, it provides a
+   * Progressable, ChecksumOpt, InetSocketAddress[], String, String)}, it provides a
    * HDFS-specific version of {@link #createNonRecursive(Path, FsPermission,
    * EnumSet, int, short, long, Progressable)} with a few additions.
    *
    * @see #create(Path, FsPermission, EnumSet, int, short, long, Progressable,
-   * ChecksumOpt, InetSocketAddress[], String) for the descriptions of
+   * ChecksumOpt, InetSocketAddress[], String, String) for the descriptions of
    * additional parameters, i.e., favoredNodes, ecPolicyName and
    * storagePolicyName.
    */

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs.client;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+
+import java.util.EnumSet;
 
 /**
  * CreateEncryptionZoneFlag is used in

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RemoteMethod.java

@@ -131,7 +131,7 @@ public class RemoteMethod {
   /**
    * Get the represented java method.
    *
-   * @return Method
+   * @return {@link Method}
    * @throws IOException If the method cannot be found.
    */
   public Method getMethod() throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java

@@ -87,7 +87,7 @@ class AsyncLoggerSet {
   
   /**
    * @return the epoch number for this writer. This may only be called after
-   * a successful call to {@link #createNewUniqueEpoch(NamespaceInfo)}.
+   * a successful call to {@link QuorumJournalManager#createNewUniqueEpoch()}.
    */
   long getEpoch() {
     Preconditions.checkState(myEpoch != INVALID_EPOCH,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -1403,7 +1403,7 @@ public class DataStorage extends Storage {
   }
 
   /**
-   * Get the BlockPoolSliceStorage from {@link bpStorageMap}.
+   * Get the BlockPoolSliceStorage from {@link #bpStorageMap}.
    * If the object is not found, create a new object and put it to the map.
    */
   synchronized BlockPoolSliceStorage getBlockPoolSliceStorage(

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/ThrottledAsyncChecker.java

@@ -44,14 +44,14 @@ import java.util.concurrent.TimeUnit;
 
 /**
  * An implementation of {@link AsyncChecker} that skips checking recently
- * checked objects. It will enforce at least {@link minMsBetweenChecks}
+ * checked objects. It will enforce at least {@link #minMsBetweenChecks}
  * milliseconds between two successive checks of any one object.
  *
  * It is assumed that the total number of Checkable objects in the system
  * is small, (not more than a few dozen) since the checker uses O(Checkables)
  * storage and also potentially O(Checkables) threads.
  *
- * {@link minMsBetweenChecks} should be configured reasonably
+ * {@link #minMsBetweenChecks} should be configured reasonably
  * by the caller to avoid spinning up too many threads frequently.
  */
 @InterfaceAudience.Private

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java

@@ -31,6 +31,7 @@ import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.ExecutionException;
 
 /**
  * Implementation of {@code Futures#withTimeout}.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java

@@ -184,7 +184,7 @@ public abstract class RamDiskReplicaTracker {
    * {@link org.apache.hadoop.hdfs.DFSConfigKeys#DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY}.
    *
    * @param conf the configuration to be used
-   * @param dataset the FsDataset object.
+   * @param fsDataset the FsDataset object.
    * @return an instance of RamDiskReplicaTracker
    */
   static RamDiskReplicaTracker getInstance(final Configuration conf,

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java

@@ -152,8 +152,8 @@ class FSDirRenameOp {
    * @param srcIIP source path
    * @param dstIIP destination path
    * @return true INodesInPath if rename succeeds; null otherwise
-   * @deprecated See {@link #renameToInt(FSDirectory, String, String,
-   * boolean, Options.Rename...)}
+   * @deprecated See {@link #renameToInt(FSDirectory, FSPermissionChecker,
+   * String, String, boolean, Options.Rename...)}
    */
   @Deprecated
   static INodesInPath unprotectedRenameTo(FSDirectory fsd,
@@ -258,8 +258,8 @@ class FSDirRenameOp {
   }
 
   /**
-   * @see {@link #unprotectedRenameTo(FSDirectory, String, String, INodesInPath,
-   * INodesInPath, long, BlocksMapUpdateInfo, Options.Rename...)}
+   * @see {@link #unprotectedRenameTo(FSDirectory, INodesInPath, INodesInPath,
+   * long, BlocksMapUpdateInfo, Options.Rename...)}
    */
   static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
       String src, String dst, BlocksMapUpdateInfo collectedBlocks,
@@ -482,8 +482,8 @@ class FSDirRenameOp {
   }
 
   /**
-   * @deprecated Use {@link #renameToInt(FSDirectory, String, String,
-   * boolean, Options.Rename...)}
+   * @deprecated Use {@link #renameToInt(FSDirectory, FSPermissionChecker,
+   * String, String, boolean, Options.Rename...)}
    */
   @Deprecated
   private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java

@@ -123,7 +123,7 @@ public class EditLogTailer {
 
   /**
    * The timeout in milliseconds of calling rollEdits RPC to Active NN.
-   * @see HDFS-4176.
+   * See HDFS-4176.
    */
   private final long rollEditsTimeoutMs;
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java

@@ -70,7 +70,7 @@ public class SnapshotFSImageFormat {
 
   /**
    * Save SnapshotDiff list for an INodeDirectoryWithSnapshot.
-   * @param sNode The directory that the SnapshotDiff list belongs to.
+   * @param diffs The directory that the SnapshotDiff list belongs to.
    * @param out The {@link DataOutput} to write.
    */
   private static <N extends INode, A extends INodeAttributes, D extends AbstractINodeDiff<N, A, D>>
@@ -186,7 +186,7 @@ public class SnapshotFSImageFormat {
    * @param createdList The created list associated with the deleted list in 
    *                    the same Diff.
    * @param in The {@link DataInput} to read.
-   * @param loader The {@link Loader} instance.
+   * @param loader The {@link FSImageFormat.Loader} instance.
    * @return The deleted list.
    */
   private static List<INode> loadDeletedList(INodeDirectory parent,
@@ -260,7 +260,7 @@ public class SnapshotFSImageFormat {
    * Load the snapshotINode field of {@link AbstractINodeDiff}.
    * @param snapshot The Snapshot associated with the {@link AbstractINodeDiff}.
    * @param in The {@link DataInput} to read.
-   * @param loader The {@link Loader} instance that this loading procedure is
+   * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
    *               using.
    * @return The snapshotINode.
    */
@@ -281,7 +281,7 @@ public class SnapshotFSImageFormat {
    * Load {@link DirectoryDiff} from fsimage.
    * @param parent The directory that the SnapshotDiff belongs to.
    * @param in The {@link DataInput} instance to read.
-   * @param loader The {@link Loader} instance that this loading procedure is 
+   * @param loader The {@link FSImageFormat.Loader} instance that this loading procedure is
    *               using.
    * @return A {@link DirectoryDiff}.
    */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java

@@ -85,7 +85,7 @@ import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERA
 /**
  * This class reads the protobuf-based fsimage and generates text output
  * for each inode to {@link PBImageTextWriter#out}. The sub-class can override
- * {@link getEntry()} to generate formatted string for each inode.
+ * {@link #getEntry(String, INode)} to generate formatted string for each inode.
  *
  * Since protobuf-based fsimage does not guarantee the order of inodes and
  * directories, PBImageTextWriter runs two-phase scans:

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java

@@ -68,7 +68,7 @@ public abstract class MD5FileUtils {
   /**
    * Read the md5 file stored alongside the given data file
    * and match the md5 file content.
-   * @param dataFile the file containing data
+   * @param md5File the file containing md5 data
    * @return a matcher with two matched groups
    *   where group(1) is the md5 string and group(2) is the data file path.
    */