Przeglądaj źródła

HDFS-2172. Address findbugs and javadoc warnings in HDFS-1073 branch. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1148592 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 14 lat temu
rodzic
commit
afe1d70c95

+ 1 - 0
hdfs/CHANGES.HDFS-1073.txt

@@ -79,3 +79,4 @@ HDFS-2160. Fix CreateEditsLog test tool in HDFS-1073 branch. (todd)
 HDFS-2168. Reenable TestEditLog.testFailedOpen and fix exposed bug. (todd)
 HDFS-2169. Clean up TestCheckpoint and remove TODOs (todd)
 HDFS-2170. Address remaining TODOs in HDFS-1073 branch. (todd)
+HDFS-2172. Address findbugs and javadoc warnings in HDFS-1073 branch. (todd)

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java

@@ -56,7 +56,7 @@ public class BackupImage extends FSImage {
    * - Transitions back to JOURNAL_ONLY if the log rolls while
    *   stopApplyingOnNextRoll is true.
    */
-  BNState bnState;
+  volatile BNState bnState;
   static enum BNState {
     // Edits from the NN should be dropped. On the next log roll,
     // transition to JOURNAL_ONLY state

+ 1 - 0
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -86,6 +86,7 @@ class Checkpointer extends Daemon {
   /**
    * Initialize checkpoint.
    */
+  @SuppressWarnings("deprecation")
   private void initialize(Configuration conf) throws IOException {
     // Create connection to the namenode.
     shouldRun = true;

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java

@@ -76,7 +76,7 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
     Storage.LOG.info("EditLogBackupOutputStream connects to: " + bnAddress);
     try {
       this.backupNode =
-        (BackupNodeProtocol) RPC.getProxy(BackupNodeProtocol.class,
+        RPC.getProxy(BackupNodeProtocol.class,
             BackupNodeProtocol.versionID, bnAddress, new HdfsConfiguration());
     } catch(IOException e) {
       Storage.LOG.error("Error connecting to: " + bnAddress, e);

+ 19 - 13
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -208,9 +208,9 @@ public class FSEditLog  {
    * store yet.
    */
   void logEdit(final FSEditLogOpCodes opCode, final Writable ... writables) {
-    assert state != State.CLOSED;
-    
     synchronized (this) {
+      assert state != State.CLOSED;
+      
       // wait if an automatic sync is scheduled
       waitIfAutoSyncScheduled();
       
@@ -467,16 +467,17 @@ public class FSEditLog  {
         metrics.addSync(elapsed);
       }
       
-      if (badJournals.size() >= journals.size()) {
-        LOG.fatal("Could not sync any journal to persistent storage. " +
-            "Unsynced transactions: " + (txid - synctxid),
-            new Exception());
-        runtime.exit(1);
-      }
     } finally {
       // Prevent RuntimeException from blocking other log edit sync 
       synchronized (this) {
         if (sync) {
+          if (badJournals.size() >= journals.size()) {
+            LOG.fatal("Could not sync any journal to persistent storage. " +
+                "Unsynced transactions: " + (txid - synctxid),
+                new Exception());
+            runtime.exit(1);
+          }
+
           synctxid = syncStart;
           isSyncRunning = false;
         }
@@ -750,7 +751,7 @@ public class FSEditLog  {
    * Used only by unit tests.
    */
   @VisibleForTesting
-  void setRuntimeForTesting(Runtime runtime) {
+  synchronized void setRuntimeForTesting(Runtime runtime) {
     this.runtime = runtime;
   }
   
@@ -879,10 +880,15 @@ public class FSEditLog  {
    */
   public void archiveLogsOlderThan(
       final long minTxIdToKeep, final StorageArchiver archiver) {
-    assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
-      minTxIdToKeep <= curSegmentTxId :
-      "cannot archive logs older than txid " + minTxIdToKeep +
-      " when current segment starts at " + curSegmentTxId;
+    synchronized (this) {
+      // synchronized to prevent findbugs warning about inconsistent
+      // synchronization. This will be JIT-ed out if asserts are
+      // off.
+      assert curSegmentTxId == FSConstants.INVALID_TXID || // on format this is no-op
+        minTxIdToKeep <= curSegmentTxId :
+        "cannot archive logs older than txid " + minTxIdToKeep +
+        " when current segment starts at " + curSegmentTxId;
+    }
     
     mapJournalsAndReportErrors(new JournalClosure() {
       @Override

+ 3 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java

@@ -461,7 +461,9 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
       long maxValidTxnCount = Long.MIN_VALUE;
       for (FoundEditLog log : logs) {
         long validTxnCount = log.validateLog().numTransactions;
-        LOG.warn("  Log " + log.getFile() + " valid txns=" + validTxnCount);
+        LOG.warn("  Log " + log.getFile() +
+            " valid txns=" + validTxnCount +
+            " valid len=" + log.validateLog().validLength);
         maxValidTxnCount = Math.max(maxValidTxnCount, validTxnCount);
       }        
 

+ 6 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -293,8 +293,9 @@ public class GetImageServlet extends HttpServlet {
       remoteport = 0;
       machineName = null;
 
-      for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
-        String key = it.next();
+      for (Map.Entry<String, String[]> entry : pmap.entrySet()) {
+        String key = entry.getKey();
+        String[] val = entry.getValue();
         if (key.equals("getimage")) { 
           isGetImage = true;
           txId = parseLongParam(request, TXID_PARAM);
@@ -306,11 +307,11 @@ public class GetImageServlet extends HttpServlet {
           isPutImage = true;
           txId = parseLongParam(request, TXID_PARAM);
         } else if (key.equals("port")) { 
-          remoteport = new Integer(pmap.get("port")[0]).intValue();
+          remoteport = new Integer(val[0]).intValue();
         } else if (key.equals("machine")) { 
-          machineName = pmap.get("machine")[0];
+          machineName = val[0];
         } else if (key.equals(STORAGEINFO_PARAM)) {
-          storageInfoString = pmap.get(key)[0];
+          storageInfoString = val[0];
         }
       }
 

+ 2 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -409,7 +409,6 @@ public class NNStorage extends Storage implements Closeable {
     Preconditions.checkArgument(txid >= 0, "bad txid: " + txid);
     
     File txIdFile = getStorageFile(sd, NameNodeFile.SEEN_TXID);
-    LOG.info("===> writing txid " + txid + " to " + txIdFile);
     OutputStream fos = new AtomicFileOutputStream(txIdFile);
     try {
       fos.write(String.valueOf(txid).getBytes());
@@ -439,8 +438,8 @@ public class NNStorage extends Storage implements Closeable {
    * 
    * This is used when the image is loaded to avoid accidental rollbacks
    * in the case where an edit log is fully deleted but there is no
-   * checkpoint. See {@link TestNameEditsConfigs#testNameEditsConfigsFailure()}
-   * @param newCpT the txid that has been reached
+   * checkpoint. See TestNameEditsConfigs.testNameEditsConfigsFailure()
+   * @param txid the txid that has been reached
    */
   public void writeTransactionIdFileToStorage(long txid) {
     // Write txid marker in all storage directories

+ 13 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
@@ -38,7 +39,7 @@ import com.google.common.collect.Sets;
  * directories of the NN and enforcing a retention policy on checkpoints
  * and edit logs.
  * 
- * It delegates the actual removal of files to a {@link #StorageArchiver}
+ * It delegates the actual removal of files to a StorageArchiver
  * implementation, which might delete the files or instead copy them to
  * a filer or HDFS for later analysis.
  */
@@ -129,13 +130,21 @@ public class NNStorageArchivalManager {
   static class DeletionStorageArchiver implements StorageArchiver {
     @Override
     public void archiveLog(FoundEditLog log) {
-      log.getFile().delete();
+      deleteOrWarn(log.getFile());
     }
 
     @Override
     public void archiveImage(FoundFSImage image) {
-      image.getFile().delete();
-      MD5FileUtils.getDigestFileForFile(image.getFile()).delete();
+      deleteOrWarn(image.getFile());
+      deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
+    }
+
+    private static void deleteOrWarn(File file) {
+      if (!file.delete()) {
+        // It's OK if we fail to delete something -- we'll catch it
+        // next time we swing through this directory.
+        LOG.warn("Could not delete " + file);
+      }      
     }
   }
 }

+ 10 - 3
hdfs/src/java/org/apache/hadoop/hdfs/util/AtomicFileOutputStream.java

@@ -23,6 +23,8 @@ import java.io.FileOutputStream;
 import java.io.FilterOutputStream;
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -41,6 +43,10 @@ import org.apache.hadoop.io.IOUtils;
 public class AtomicFileOutputStream extends FilterOutputStream {
 
   private static final String TMP_EXTENSION = ".tmp";
+  
+  private final static Log LOG = LogFactory.getLog(
+      AtomicFileOutputStream.class);
+  
   private final File origFile;
   private final File tmpFile;
   
@@ -67,8 +73,7 @@ public class AtomicFileOutputStream extends FilterOutputStream {
         boolean renamed = tmpFile.renameTo(origFile);
         if (!renamed) {
           // On windows, renameTo does not replace.
-          origFile.delete();
-          if (!tmpFile.renameTo(origFile)) {
+          if (!origFile.delete() || !tmpFile.renameTo(origFile)) {
             throw new IOException("Could not rename temporary file " +
                 tmpFile + " to " + origFile);
           }
@@ -79,7 +84,9 @@ public class AtomicFileOutputStream extends FilterOutputStream {
           IOUtils.closeStream(out);
         }
         // close wasn't successful, try to delete the tmp file
-        tmpFile.delete();
+        if (!tmpFile.delete()) {
+          LOG.warn("Unable to delete tmp file " + tmpFile);
+        }
       }
     }
   }

+ 6 - 27
hdfs/src/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java

@@ -81,7 +81,9 @@ public abstract class MD5FileUtils {
     BufferedReader reader =
       new BufferedReader(new FileReader(md5File));
     try {
-      md5Line = reader.readLine().trim();
+      md5Line = reader.readLine();
+      if (md5Line == null) { md5Line = ""; }
+      md5Line = md5Line.trim();
     } catch (IOException ioe) {
       throw new IOException("Error reading md5 file at " + md5File, ioe);
     } finally {
@@ -136,32 +138,9 @@ public abstract class MD5FileUtils {
         digest.getDigest());
     String md5Line = digestString + " *" + dataFile.getName() + "\n";
     
-    File md5FileTmp = new File(md5File.getParentFile(),
-        md5File.getName() + ".tmp");
-    
-    boolean success = false;
-    
-    // Write to tmp file
-    FileWriter writer = new FileWriter(md5FileTmp);
-    try {
-      writer.write(md5Line);
-      success = true;
-    } finally {
-      IOUtils.cleanup(LOG, writer);
-      if (!success) {
-        md5FileTmp.delete();
-      }
-    }
-    
-    // Move tmp file into place
-    if (!md5FileTmp.renameTo(md5File)) {
-      if (!md5File.delete() || !md5FileTmp.renameTo(md5File)) {
-        md5FileTmp.delete();
-        throw new IOException(
-            "Unable to rename " + md5FileTmp + " to " + md5File);
-      }
-    }
-    
+    AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File);
+    afos.write(md5Line.getBytes());
+    afos.close();
     LOG.debug("Saved MD5 " + digest + " to " + md5File);
   }
 

+ 9 - 0
hdfs/src/test/findbugsExcludeFile.xml

@@ -229,6 +229,15 @@
        <Bug pattern="REC_CATCH_EXCEPTION" />
      </Match>
 
+     <!--
+      lastAppliedTxid is carefully unsynchronized in the BackupNode in a couple spots.
+      See the comments in BackupImage for justification.
+     -->
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.namenode.FSImage" />
+       <Field name="lastAppliedTxId" />
+       <Bug pattern="IS2_INCONSISTENT_SYNC" />
+     </Match>
      <!--
       Findbugs doesn't realize that closing a FilterOutputStream pushes the close down to
       wrapped streams, too.