Преглед на файлове

HADOOP-3009. TestFileCreation sometimes fails because restarting
minidfscluster sometimes creates datanodes with ports that are
different from their original instance. (dhruba)



git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@637305 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur преди 17 години
родител
ревизия
f7649e99c1
променени са 3 файла, в които са добавени 36 реда и са изтрити 9 реда
  1. 4 0
      CHANGES.txt
  2. 17 7
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  3. 15 2
      src/test/org/apache/hadoop/dfs/TestFileCreation.java

+ 4 - 0
CHANGES.txt

@@ -218,6 +218,10 @@ Trunk (unreleased changes)
     HADOOP-2994. Code cleanup for DFSClient: remove redundant 
     conversions from string to string.  (Dave Brosius via dhruba)
 
+    HADOOP-3009. TestFileCreation sometimes fails because restarting
+    minidfscluster sometimes creates datanodes with ports that are
+    different from their original instance. (dhruba)
+
 Release 0.16.1 - 2008-03-13
 
   INCOMPATIBLE CHANGES

+ 17 - 7
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -2605,24 +2605,34 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
                                    + block.getBlockName() + " on " + node.getName()
                                    + " size " + block.getNumBytes());
     }
-
     //
-    // if file is being actively written to, then do not check 
-    // replication-factor here. It will be checked when the file is closed.
+    // If this block does not belong to anyfile, then we are done.
     //
-    if (fileINode == null || fileINode.isUnderConstruction()) {
+    if (fileINode == null) {
+      NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
+                                   + "addStoredBlock request received for " 
+                                   + block.getBlockName() + " on " + node.getName()
+                                   + " size " + block.getNumBytes()
+                                   + " But it does not belong to any file.");
       return block;
     }
-        
+
     // filter out containingNodes that are marked for decommission.
     NumberReplicas num = countNodes(block);
     int numCurrentReplica = num.liveReplicas()
       + pendingReplications.getNumReplicas(block);
-        
+
     // check whether safe replication is reached for the block
-    // only if it is a part of a files
     incrementSafeBlockCount(numCurrentReplica);
  
+    //
+    // if file is being actively written to, then do not check 
+    // replication-factor here. It will be checked when the file is closed.
+    //
+    if (fileINode.isUnderConstruction()) {
+      return block;
+    }
+        
     // handle underReplication/overReplication
     short fileReplication = fileINode.getReplication();
     if (numCurrentReplica >= fileReplication) {

+ 15 - 2
src/test/org/apache/hadoop/dfs/TestFileCreation.java

@@ -66,6 +66,16 @@ public class TestFileCreation extends TestCase {
     stm.write(buffer);
   }
 
+  //
+  // writes specified bytes to file.
+  //
+  private void writeFile(FSDataOutputStream stm, int size) throws IOException {
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer, 0, size);
+  }
+
   //
   // verify that the data written to the full blocks are sane
   // 
@@ -362,7 +372,10 @@ public class TestFileCreation extends TestCase {
       System.out.println("testFileCreationNamenodeRestart: "
                          + "Created file filestatus.dat with one "
                          + " replicas.");
-      writeFile(stm);
+
+      // write two full blocks.
+      writeFile(stm, numBlocks * blockSize);
+      stm.flush();
 
       // create another new file.
       //
@@ -410,7 +423,7 @@ public class TestFileCreation extends TestCase {
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
-                 locations.locatedBlockCount() == 1);
+                 locations.locatedBlockCount() == 3);
 
       // verify filestatus2.dat
       locations = client.namenode.getBlockLocations(