浏览代码

HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch. Contributed by Haohui Mai.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5698@1565765 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 年之前
父节点
当前提交
4fa343ae6f

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5698.txt

@@ -40,3 +40,6 @@ HDFS-5698 subtasks
 
 
     HDFS-5885. Add annotation for repeated fields in the protobuf definition.
     HDFS-5885. Add annotation for repeated fields in the protobuf definition.
     (Haohui Mai via jing9)
     (Haohui Mai via jing9)
+
+    HDFS-5906. Fixing findbugs and javadoc warnings in the HDFS-5698 branch.
+    (Haohui Mai via jing9)

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -64,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
@@ -973,7 +972,8 @@ public final class CacheManager {
 
 
     for (CachePool pool : cachePools.values()) {
     for (CachePool pool : cachePools.values()) {
       CachePoolInfo p = pool.getInfo(true);
       CachePoolInfo p = pool.getInfo(true);
-      CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder();
+      CachePoolInfoProto.Builder b = CachePoolInfoProto.newBuilder()
+          .setPoolName(p.getPoolName());
 
 
       if (p.getOwnerName() != null)
       if (p.getOwnerName() != null)
         b.setOwnerName(p.getOwnerName());
         b.setOwnerName(p.getOwnerName());

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java

@@ -87,7 +87,6 @@ public class FSImageFormatPBSnapshot {
      * Load the snapshots section from fsimage. Also convert snapshottable
      * Load the snapshots section from fsimage. Also convert snapshottable
      * directories into {@link INodeDirectorySnapshottable}.
      * directories into {@link INodeDirectorySnapshottable}.
      *
      *
-     * @return A map containing all the snapshots loaded from the fsimage.
      */
      */
     public void loadSnapshotSection(InputStream in) throws IOException {
     public void loadSnapshotSection(InputStream in) throws IOException {
       SnapshotManager sm = fsn.getSnapshotManager();
       SnapshotManager sm = fsn.getSnapshotManager();

+ 14 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java

@@ -208,11 +208,13 @@ public class SnapshotFSImageFormat {
   
   
   /**
   /**
    * Load snapshots and snapshotQuota for a Snapshottable directory.
    * Load snapshots and snapshotQuota for a Snapshottable directory.
-   * @param snapshottableParent The snapshottable directory for loading.
-   * @param numSnapshots The number of snapshots that the directory has.
-   * @param in The {@link DataInput} instance to read.
-   * @param loader The {@link Loader} instance that this loading procedure is 
-   *               using.
+   *
+   * @param snapshottableParent
+   *          The snapshottable directory for loading.
+   * @param numSnapshots
+   *          The number of snapshots that the directory has.
+   * @param loader
+   *          The loader
    */
    */
   public static void loadSnapshotList(
   public static void loadSnapshotList(
       INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
       INodeDirectorySnapshottable snapshottableParent, int numSnapshots,
@@ -230,10 +232,13 @@ public class SnapshotFSImageFormat {
   /**
   /**
    * Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
    * Load the {@link SnapshotDiff} list for the INodeDirectoryWithSnapshot
    * directory.
    * directory.
-   * @param dir The snapshottable directory for loading.
-   * @param in The {@link DataInput} instance to read.
-   * @param loader The {@link Loader} instance that this loading procedure is 
-   *               using.
+   *
+   * @param dir
+   *          The snapshottable directory for loading.
+   * @param in
+   *          The {@link DataInput} instance to read.
+   * @param loader
+   *          The loader
    */
    */
   public static void loadDirectoryDiffList(INodeDirectory dir,
   public static void loadDirectoryDiffList(INodeDirectory dir,
       DataInput in, FSImageFormat.Loader loader) throws IOException {
       DataInput in, FSImageFormat.Loader loader) throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java

@@ -129,7 +129,7 @@ final class FileDistributionCalculator {
         totalSpace += fileSize;
         totalSpace += fileSize;
 
 
         int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
         int bucket = fileSize > maxSize ? distribution.length - 1 : (int) Math
-            .ceil(fileSize / steps);
+            .ceil((double)fileSize / steps);
         ++distribution[bucket];
         ++distribution[bucket];
 
 
       } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {
       } else if (p.getType() == INodeSection.INode.Type.DIRECTORY) {