1
0
فهرست منبع

HDFS-3039. Address findbugs and javadoc warnings on branch. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1296017 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 13 سال پیش
والد
کامیت
7be4e5bd22
15فایلهای تغییر یافته به همراه56 افزوده شده و 40 حذف شده
  1. 5 1
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  2. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  3. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
  5. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
  6. 2 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt
  7. 8 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  8. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
  9. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  11. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  12. 17 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  13. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  14. 6 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
  15. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

+ 5 - 1
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -278,8 +278,12 @@
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
     </Match>
     </Match>
-		<Match>
+    <Match>
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
       <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
     </Match>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
+    </Match>
  </FindBugsFilter>
  </FindBugsFilter>

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -234,7 +234,7 @@ public class ActiveStandbyElector implements Watcher, StringCallback,
   /**
   /**
    * Exception thrown when there is no active leader
    * Exception thrown when there is no active leader
    */
    */
-  public class ActiveNotFoundException extends Exception {
+  public static class ActiveNotFoundException extends Exception {
     private static final long serialVersionUID = 3505396722342846462L;
     private static final long serialVersionUID = 3505396722342846462L;
   }
   }
 
 

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -262,8 +262,7 @@ public abstract class HAAdmin extends Configured implements Tool {
       return -1;
       return -1;
     }
     }
 
 
-    int i = 0;
-    String cmd = argv[i++];
+    String cmd = argv[0];
 
 
     if (!cmd.startsWith("-")) {
     if (!cmd.startsWith("-")) {
       errOut.println("Bad command '" + cmd + "': expected command starting with '-'");
       errOut.println("Bad command '" + cmd + "': expected command starting with '-'");

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

@@ -76,7 +76,7 @@ public class SshFenceByTcpPort extends Configured
     if (argStr != null) {
     if (argStr != null) {
       // Use a dummy service when checking the arguments defined
       // Use a dummy service when checking the arguments defined
       // in the configuration are parseable.
       // in the configuration are parseable.
-      Args args = new Args(new InetSocketAddress("localhost", 8020), argStr);
+      new Args(new InetSocketAddress("localhost", 8020), argStr);
     }
     }
   }
   }
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java

@@ -30,7 +30,7 @@ public class ThreadUtil {
   /**
   /**
    * Cause the current thread to sleep as close as possible to the provided
    * Cause the current thread to sleep as close as possible to the provided
    * number of milliseconds. This method will log and ignore any
    * number of milliseconds. This method will log and ignore any
-   * {@link InterrupedException} encountered.
+   * {@link InterruptedException} encountered.
    * 
    * 
    * @param millis the number of milliseconds for the current thread to sleep
    * @param millis the number of milliseconds for the current thread to sleep
    */
    */

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-1623.txt

@@ -248,3 +248,5 @@ HDFS-3023. Optimize entries in edits log for persistBlocks call. (todd)
 HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm)
 HDFS-2979. Balancer should use logical uri for creating failover proxy with HA enabled. (atm)
 
 
 HDFS-3035. Fix failure of TestFileAppendRestart due to OP_UPDATE_BLOCKS (todd)
 HDFS-3035. Fix failure of TestFileAppendRestart due to OP_UPDATE_BLOCKS (todd)
+
+HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -247,4 +247,12 @@
        <Method name="save" />
        <Method name="save" />
        <Bug pattern="OS_OPEN_STREAM" />
        <Bug pattern="OS_OPEN_STREAM" />
      </Match>
      </Match>
+     <!--
+      This method isn't performance-critical and is much clearer to write as it's written.
+      -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.datanode.BlockPoolManager" />
+       <Method name="doRefreshNamenodes" />
+       <Bug category="PERFORMANCE" />
+     </Match>
  </FindBugsFilter>
  </FindBugsFilter>

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java

@@ -99,7 +99,8 @@ public class HAUtil {
         nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
         nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
     if (suffixes == null) {
     if (suffixes == null) {
       String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
       String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
-          " must be suffixed with" + namenodeId + " for HA configuration.";
+          " must be suffixed with nameservice and namenode ID for HA " +
+          "configuration.";
       throw new HadoopIllegalArgumentException(msg);
       throw new HadoopIllegalArgumentException(msg);
     }
     }
     
     

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -63,7 +63,8 @@ public class NameNodeProxies {
   /**
   /**
    * Wrapper for a client proxy as well as its associated service ID.
    * Wrapper for a client proxy as well as its associated service ID.
    * This is simply used as a tuple-like return type for
    * This is simply used as a tuple-like return type for
-   * {@link createProxy} and {@link createNonHaProxy}.
+   * {@link NameNodeProxies#createProxy} and
+   * {@link NameNodeProxies#createNonHAProxy}.
    */
    */
   public static class ProxyAndInfo<PROXYTYPE> {
   public static class ProxyAndInfo<PROXYTYPE> {
     private final PROXYTYPE proxy;
     private final PROXYTYPE proxy;
@@ -125,7 +126,7 @@ public class NameNodeProxies {
 
 
   /**
   /**
    * Creates an explicitly non-HA-enabled proxy object. Most of the time you
    * Creates an explicitly non-HA-enabled proxy object. Most of the time you
-   * don't want to use this, and should instead use {@link createProxy}.
+   * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
    * 
    * 
    * @param conf the configuration object
    * @param conf the configuration object
    * @param nnAddr address of the remote NN to connect to
    * @param nnAddr address of the remote NN to connect to
@@ -160,8 +161,8 @@ public class NameNodeProxies {
           conf, ugi);
           conf, ugi);
     } else {
     } else {
       String message = "Upsupported protocol found when creating the proxy " +
       String message = "Upsupported protocol found when creating the proxy " +
-          "conection to NameNode: " +
-          ((xface != null) ? xface.getClass().getName() : xface);
+          "connection to NameNode: " +
+          ((xface != null) ? xface.getClass().getName() : "null");
       LOG.error(message);
       LOG.error(message);
       throw new IllegalStateException(message);
       throw new IllegalStateException(message);
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1918,7 +1918,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     int numCurrentReplica = countLiveNodes(storedBlock);
     int numCurrentReplica = countLiveNodes(storedBlock);
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
     if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
         && numCurrentReplica >= minReplication) {
         && numCurrentReplica >= minReplication) {
-      storedBlock = completeBlock(storedBlock.getINode(), storedBlock, false);
+      completeBlock(storedBlock.getINode(), storedBlock, false);
     } else if (storedBlock.isComplete()) {
     } else if (storedBlock.isComplete()) {
       // check whether safe replication is reached for the block
       // check whether safe replication is reached for the block
       // only complete blocks are counted towards that.
       // only complete blocks are counted towards that.

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -173,7 +173,7 @@ class BPOfferService {
     }
     }
   }
   }
   
   
-  NamespaceInfo getNamespaceInfo() {
+  synchronized NamespaceInfo getNamespaceInfo() {
     return bpNSInfo;
     return bpNSInfo;
   }
   }
   
   
@@ -366,7 +366,7 @@ class BPOfferService {
     }
     }
   }
   }
 
 
-  DatanodeRegistration createRegistration() {
+  synchronized DatanodeRegistration createRegistration() {
     Preconditions.checkState(bpNSInfo != null,
     Preconditions.checkState(bpNSInfo != null,
         "getRegistration() can only be called after initial handshake");
         "getRegistration() can only be called after initial handshake");
     return dn.createBPRegistration(bpNSInfo);
     return dn.createBPRegistration(bpNSInfo);

+ 17 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -188,7 +188,7 @@ public class FSEditLog  {
     this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
     this.sharedEditsDirs = FSNamesystem.getSharedEditsDirs(conf);
   }
   }
   
   
-  public void initJournalsForWrite() {
+  public synchronized void initJournalsForWrite() {
     Preconditions.checkState(state == State.UNINITIALIZED ||
     Preconditions.checkState(state == State.UNINITIALIZED ||
         state == State.CLOSED, "Unexpected state: %s", state);
         state == State.CLOSED, "Unexpected state: %s", state);
     
     
@@ -196,7 +196,7 @@ public class FSEditLog  {
     state = State.BETWEEN_LOG_SEGMENTS;
     state = State.BETWEEN_LOG_SEGMENTS;
   }
   }
   
   
-  public void initSharedJournalsForRead() {
+  public synchronized void initSharedJournalsForRead() {
     if (state == State.OPEN_FOR_READING) {
     if (state == State.OPEN_FOR_READING) {
       LOG.warn("Initializing shared journals for READ, already open for READ",
       LOG.warn("Initializing shared journals for READ, already open for READ",
           new Exception());
           new Exception());
@@ -209,7 +209,7 @@ public class FSEditLog  {
     state = State.OPEN_FOR_READING;
     state = State.OPEN_FOR_READING;
   }
   }
   
   
-  private void initJournals(List<URI> dirs) {
+  private synchronized void initJournals(List<URI> dirs) {
     int minimumRedundantJournals = conf.getInt(
     int minimumRedundantJournals = conf.getInt(
         DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,
         DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY,
         DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT);
@@ -808,7 +808,7 @@ public class FSEditLog  {
    * Used only by unit tests.
    * Used only by unit tests.
    */
    */
   @VisibleForTesting
   @VisibleForTesting
-  List<JournalAndStream> getJournals() {
+  synchronized List<JournalAndStream> getJournals() {
     return journalSet.getAllJournalStreams();
     return journalSet.getAllJournalStreams();
   }
   }
   
   
@@ -816,7 +816,7 @@ public class FSEditLog  {
    * Used only by tests.
    * Used only by tests.
    */
    */
   @VisibleForTesting
   @VisibleForTesting
-  public JournalSet getJournalSet() {
+  synchronized public JournalSet getJournalSet() {
     return journalSet;
     return journalSet;
   }
   }
   
   
@@ -950,17 +950,14 @@ public class FSEditLog  {
   /**
   /**
    * Archive any log files that are older than the given txid.
    * Archive any log files that are older than the given txid.
    */
    */
-  public void purgeLogsOlderThan(final long minTxIdToKeep) {
-    synchronized (this) {
-      // synchronized to prevent findbugs warning about inconsistent
-      // synchronization. This will be JIT-ed out if asserts are
-      // off.
-      assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
-        minTxIdToKeep <= curSegmentTxId :
-        "cannot purge logs older than txid " + minTxIdToKeep +
-        " when current segment starts at " + curSegmentTxId;
-    }
-
+  public synchronized void purgeLogsOlderThan(final long minTxIdToKeep) {
+    assert curSegmentTxId == HdfsConstants.INVALID_TXID || // on format this is no-op
+      minTxIdToKeep <= curSegmentTxId :
+      "cannot purge logs older than txid " + minTxIdToKeep +
+      " when current segment starts at " + curSegmentTxId;
+
+    // This could be improved to not need synchronization. But currently,
+    // journalSet is not threadsafe, so we need to synchronize this method.
     try {
     try {
       journalSet.purgeLogsOlderThan(minTxIdToKeep);
       journalSet.purgeLogsOlderThan(minTxIdToKeep);
     } catch (IOException ex) {
     } catch (IOException ex) {
@@ -992,8 +989,8 @@ public class FSEditLog  {
 
 
 
 
   // sets the initial capacity of the flush buffer.
   // sets the initial capacity of the flush buffer.
-  public void setOutputBufferCapacity(int size) {
-      journalSet.setOutputBufferCapacity(size);
+  synchronized void setOutputBufferCapacity(int size) {
+    journalSet.setOutputBufferCapacity(size);
   }
   }
 
 
   /**
   /**
@@ -1069,7 +1066,7 @@ public class FSEditLog  {
   /**
   /**
    * Run recovery on all journals to recover any unclosed segments
    * Run recovery on all journals to recover any unclosed segments
    */
    */
-  void recoverUnclosedStreams() {
+  synchronized void recoverUnclosedStreams() {
     Preconditions.checkState(
     Preconditions.checkState(
         state == State.BETWEEN_LOG_SEGMENTS,
         state == State.BETWEEN_LOG_SEGMENTS,
         "May not recover segments - wrong state: %s", state);
         "May not recover segments - wrong state: %s", state);
@@ -1092,7 +1089,7 @@ public class FSEditLog  {
    * @param toAtLeast the selected streams must contain this transaction
    * @param toAtLeast the selected streams must contain this transaction
    * @param inProgessOk set to true if in-progress streams are OK
    * @param inProgessOk set to true if in-progress streams are OK
    */
    */
-  public Collection<EditLogInputStream> selectInputStreams(long fromTxId,
+  public synchronized Collection<EditLogInputStream> selectInputStreams(long fromTxId,
       long toAtLeastTxId, boolean inProgressOk) throws IOException {
       long toAtLeastTxId, boolean inProgressOk) throws IOException {
     List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
     List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
     EditLogInputStream stream = journalSet.getInputStream(fromTxId, inProgressOk);
     EditLogInputStream stream = journalSet.getInputStream(fromTxId, inProgressOk);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -494,7 +494,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       nnResourceChecker = new NameNodeResourceChecker(conf);
       nnResourceChecker = new NameNodeResourceChecker(conf);
       checkAvailableResources();
       checkAvailableResources();
       assert safeMode != null &&
       assert safeMode != null &&
-        !safeMode.initializedReplQueues;
+        !safeMode.isPopulatingReplQueues();
       setBlockTotal();
       setBlockTotal();
       blockManager.activate(conf);
       blockManager.activate(conf);
       this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
       this.nnrmthread = new Daemon(new NameNodeResourceMonitor());
@@ -3801,7 +3801,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       }
       }
     }
     }
 
 
-    private void adjustBlockTotals(int deltaSafe, int deltaTotal) {
+    private synchronized void adjustBlockTotals(int deltaSafe, int deltaTotal) {
       if (!shouldIncrementallyTrackBlocks) {
       if (!shouldIncrementallyTrackBlocks) {
         return;
         return;
       }
       }

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -310,7 +310,9 @@ class FileJournalManager implements JournalManager {
         // file, but before writing anything to it. Safe to delete it.
         // file, but before writing anything to it. Safe to delete it.
         if (elf.getFile().length() == 0) {
         if (elf.getFile().length() == 0) {
           LOG.info("Deleting zero-length edit log file " + elf);
           LOG.info("Deleting zero-length edit log file " + elf);
-          elf.getFile().delete();
+          if (!elf.getFile().delete()) {
+            throw new IOException("Unable to delete file " + elf.getFile());
+          }
           continue;
           continue;
         }
         }
         
         
@@ -328,7 +330,9 @@ class FileJournalManager implements JournalManager {
         // delete the file.
         // delete the file.
         if (elf.getNumTransactions() == 0) {
         if (elf.getNumTransactions() == 0) {
           LOG.info("Deleting edit log file with zero transactions " + elf);
           LOG.info("Deleting edit log file with zero transactions " + elf);
-          elf.getFile().delete();
+          if (!elf.getFile().delete()) {
+            throw new IOException("Unable to delete " + elf.getFile());
+          }
           continue;
           continue;
         }
         }
         
         

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -315,10 +315,10 @@ class NamenodeJspHelper {
       // since the standby namenode doesn't compute replication queues 
       // since the standby namenode doesn't compute replication queues 
       String underReplicatedBlocks = "";
       String underReplicatedBlocks = "";
       if (nn.getServiceState() == HAServiceState.ACTIVE) {
       if (nn.getServiceState() == HAServiceState.ACTIVE) {
-    	  underReplicatedBlocks = new String(rowTxt() 
+    	  underReplicatedBlocks = rowTxt() 
               + colTxt("Excludes missing blocks.")
               + colTxt("Excludes missing blocks.")
               + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
               + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
-              + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks()); 
+              + fsn.getBlockManager().getUnderReplicatedNotMissingBlocks(); 
       }
       }
       out.print("<div id=\"dfstable\"> <table>\n" + rowTxt() + colTxt()
       out.print("<div id=\"dfstable\"> <table>\n" + rowTxt() + colTxt()
           + "Configured Capacity" + colTxt() + ":" + colTxt()
           + "Configured Capacity" + colTxt() + ":" + colTxt()