فهرست منبع

HADOOP-1297. Fix datanode so that requests to remove blocks that do not exist no longer causes block reports to be re-sent every second. Contributed by Dhruba.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@532873 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 سال پیش
والد
کامیت
4d055a456e
3فایلهای تغییر یافته به همراه40 افزوده شده و 15 حذف شده
  1. 4 0
      CHANGES.txt
  2. 1 1
      src/java/org/apache/hadoop/dfs/DataNode.java
  3. 35 14
      src/java/org/apache/hadoop/dfs/FSDataset.java

+ 4 - 0
CHANGES.txt

@@ -273,6 +273,10 @@ Trunk (unreleased changes)
 81. HADOOP-1293.  Fix contrib/streaming to print more than the first
     twenty lines of standard error.  (Koji Noguchi via cutting)
 
+82. HADOOP-1297.  Fix datanode so that requests to remove blocks that
+    do not exist no longer causes block reports to be re-sent every
+    second.  (Dhruba Borthakur via cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

+ 1 - 1
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -477,8 +477,8 @@ public class DataNode implements FSConstants, Runnable {
           //
           DatanodeCommand cmd = namenode.blockReport(dnRegistration,
                                                      data.getBlockReport());
-          processCommand(cmd);
           lastBlockReport = now;
+          processCommand(cmd);
         }
             
         //

+ 35 - 14
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -587,36 +587,57 @@ class FSDataset implements FSConstants {
    * just get rid of it.
    */
   public void invalidate(Block invalidBlks[]) throws IOException {
+    boolean error = false;
     for (int i = 0; i < invalidBlks.length; i++) {
-      File f;
+      File f = null;
       synchronized (this) {
         f = getFile(invalidBlks[i]);
+        FSVolume v = volumeMap.get(invalidBlks[i]);
         if (f == null) {
-          throw new IOException("Unexpected error trying to delete block "
-                                + invalidBlks[i] + 
-                                ". Block not found in blockMap.");
+          DataNode.LOG.warn("Unexpected error trying to delete block "
+                            + invalidBlks[i] + 
+                            ". Block not found in blockMap." +
+                            ((v == null) ? " " : " Block found in volumeMap."));
+          error = true;
+          continue;
         }
-        FSVolume v = volumeMap.get(invalidBlks[i]);
         if (v == null) {
-          throw new IOException("Unexpected error trying to delete block "
-                                + invalidBlks[i] + 
-                                ". No volume for this block.");
+          DataNode.LOG.warn("Unexpected error trying to delete block "
+                            + invalidBlks[i] + 
+                            ". No volume for this block." +
+                            " Block found in blockMap. " + f + ".");
+          error = true;
+          continue;
         }
         File parent = f.getParentFile();
         if (parent == null) {
-          throw new IOException("Unexpected error trying to delete block "
-                                + invalidBlks[i] + 
-                                ". Parent not found for file " + f + ".");
+          DataNode.LOG.warn("Unexpected error trying to delete block "
+                            + invalidBlks[i] + 
+                            ". Parent not found for file " + f + ".");
+          error = true;
+          continue;
         }
         v.clearPath(parent);
         blockMap.remove(invalidBlks[i]);
         volumeMap.remove(invalidBlks[i]);
       }
       if (!f.delete()) {
-        throw new IOException("Unexpected error trying to delete block "
-                              + invalidBlks[i] + " at file " + f);
+        DataNode.LOG.warn("Unexpected error trying to delete block "
+                          + invalidBlks[i] + " at file " + f);
+        error = true;
+        continue;
+      }
+      DataNode.LOG.info("Deleting block " + invalidBlks[i] + " file " + f);
+      if (f.exists()) {
+        //
+        // This is a temporary check especially for hadoop-1220. 
+        // This will go away in the future.
+        //
+        DataNode.LOG.info("File " + f + " was deleted but still exists!");
       }
-      DataNode.LOG.info("Deleting block " + invalidBlks[i]);
+    }
+    if (error) {
+      throw new IOException("Error in deleting blocks.");
     }
   }