Browse Source

HDFS-2309. TestRenameWhileOpen fails.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security@1165939 13f79535-47bb-0310-9956-ffa450edef68
Jitendra Nath Pandey 14 years ago
parent
commit
faedfb80fd

+ 2 - 0
CHANGES.txt

@@ -97,6 +97,8 @@ Release 0.20.205.0 - unreleased
     HADOOP-7596. Makes packaging of 64-bit jsvc possible. Has other
     HADOOP-7596. Makes packaging of 64-bit jsvc possible. Has other
     bug fixes to do with packaging. (Eric Yang via ddas) 
     bug fixes to do with packaging. (Eric Yang via ddas) 
 
 
+    HDFS-2309. TestRenameWhileOpen fails. (jitendra)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via

+ 11 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java

@@ -118,6 +118,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
   private long lastBlocksScheduledRollTime = 0;
   private long lastBlocksScheduledRollTime = 0;
   private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
   private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min
   
   
+  // Set to false after processing first block report
+  private boolean firstBlockReport = true; 
+  
   /** Default constructor */
   /** Default constructor */
   public DatanodeDescriptor() {}
   public DatanodeDescriptor() {}
   
   
@@ -561,4 +564,12 @@ public class DatanodeDescriptor extends DatanodeInfo {
   public void setBalancerBandwidth(long bandwidth) {
   public void setBalancerBandwidth(long bandwidth) {
     this.bandwidth = bandwidth;
     this.bandwidth = bandwidth;
   }
   }
+
+  boolean firstBlockReport() {
+    return firstBlockReport;
+  }
+  
+  void processedBlockReport() {
+    firstBlockReport = false;
+  }
 }
 }

+ 5 - 5
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -84,7 +84,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
 import org.apache.hadoop.hdfs.server.namenode.UnderReplicatedBlocks.BlockIterator;
@@ -3332,7 +3331,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     
     
     // To minimize startup time, we discard any second (or later) block reports
     // To minimize startup time, we discard any second (or later) block reports
     // that we receive while still in startup phase.
     // that we receive while still in startup phase.
-    if (isInStartupSafeMode() && node.numBlocks() > 0) {
+    if (isInStartupSafeMode() && !node.firstBlockReport()) {
       NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: "
       NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: "
           + "discarded non-initial block report from " + nodeID.getName()
           + "discarded non-initial block report from " + nodeID.getName()
           + " because namenode still in startup phase");
           + " because namenode still in startup phase");
@@ -3365,6 +3364,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     NameNode.stateChangeLog.info("*BLOCK* NameSystem.processReport: from "
     NameNode.stateChangeLog.info("*BLOCK* NameSystem.processReport: from "
         + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
         + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
         + ", processing time: " + (endTime - startTime) + " msecs");
+    node.processedBlockReport();
   }
   }
 
 
   /**
   /**
@@ -3867,9 +3867,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       throw new DisallowedDatanodeException(node);
       throw new DisallowedDatanodeException(node);
     }
     }
 
 
-    // decrement number of blocks scheduled to this datanode.
-    node.decBlocksScheduled();
-    
     // get the deletion hint node
     // get the deletion hint node
     DatanodeDescriptor delHintNode = null;
     DatanodeDescriptor delHintNode = null;
     if(delHint!=null && delHint.length()!=0) {
     if(delHint!=null && delHint.length()!=0) {
@@ -3887,6 +3884,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     // 
     // 
     pendingReplications.remove(block);
     pendingReplications.remove(block);
     addStoredBlock(block, node, delHintNode );
     addStoredBlock(block, node, delHintNode );
+    
+    // decrement number of blocks scheduled to this datanode.
+    node.decBlocksScheduled();    
   }
   }
 
 
   public long getMissingBlocksCount() {
   public long getMissingBlocksCount() {