Procházet zdrojové kódy

HDFS-1555. Disallow pipelien recovery if a file is already being lease recovered. Contributed by Hairong Kuang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-append@1056491 13f79535-47bb-0310-9956-ffa450edef68
Hairong Kuang před 14 roky
rodič
revize
824de85339

+ 3 - 0
CHANGES.txt

@@ -91,6 +91,9 @@ Release 0.20-append - Unreleased
     HDFS-724.  Use a bidirectional heartbeat to detect stuck
     pipeline. (hairong)
 
+    HDFS-1555. Disallow pipelien recovery if a file is already being
+    lease recovered. (hairong)
+
 Release 0.20.3 - Unreleased
 
   NEW FEATURES

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -1574,7 +1574,7 @@ public class DataNode extends Configured
 
     List<DatanodeID> successList = new ArrayList<DatanodeID>();
 
-    long generationstamp = namenode.nextGenerationStamp(block);
+    long generationstamp = namenode.nextGenerationStamp(block, closeFile);
     Block newblock = new Block(block.getBlockId(), block.getNumBytes(), generationstamp);
 
     for(BlockRecord r : syncList) {

+ 17 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -893,6 +893,23 @@ class FSDirectory implements FSConstants, Closeable {
     return fullPathName.toString();
   }
   
+    /** Return the full path name of the specified inode */
+    static String getFullPathName(INode inode) {
+      // calculate the depth of this inode from root
+      int depth = 0;
+      for (INode i = inode; i != null; i = i.parent) {
+        depth++;
+      }
+      INode[] inodes = new INode[depth];
+      
+      // fill up the inodes in the path from this inode to root
+      for (int i = 0; i < depth; i++) {
+        inodes[depth-i-1] = inode;
+        inode = inode.parent;
+      }
+      return getFullPathName(inodes, depth-1);
+   }
+                                                          
   /**
    * Create a directory 
    * If ancestor directories do not exist, automatically create them.

+ 15 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -4848,8 +4848,12 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
   /**
    * Verifies that the block is associated with a file that has a lease.
    * Increments, logs and then returns the stamp
-   */
-  synchronized long nextGenerationStampForBlock(Block block) throws IOException {
+   *
+   * @param block block
+   * @param fromNN if it is for lease recovery initiated by NameNode
+   * @return a new generation stamp
+   */  
+  synchronized long nextGenerationStampForBlock(Block block, boolean fromNN) throws IOException {
     if (isInSafeMode()) {
       throw new SafeModeException("Cannot get nextGenStamp for " + block, safeMode);
     }
@@ -4865,6 +4869,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       LOG.info(msg);
       throw new IOException(msg);
     }
+    // Disallow client-initiated recovery once
+    // NameNode initiated lease recovery starts
+    if (!fromNN && HdfsConstants.NN_RECOVERY_LEASEHOLDER.equals(
+        leaseManager.getLeaseByPath(FSDirectory.getFullPathName(fileINode)).getHolder())) {
+      String msg = block +
+        "is being recovered by NameNode, ignoring the request from a client";
+      LOG.info(msg);
+      throw new IOException(msg);
+    }
     if (!((INodeFileUnderConstruction)fileINode).setLastRecoveryTime(now())) {
       String msg = block + " is already being recovered, ignoring this request.";
       LOG.info(msg);

+ 6 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -201,6 +201,12 @@ public class LeaseManager {
       this.holder = holder;
       renew();
     }
+
+    /** Get the holder of the lease */
+    public String getHolder() {
+      return holder;
+    }
+
     /** Only LeaseManager object can renew a lease */
     private void renew() {
       this.lastUpdate = FSNamesystem.now();

+ 2 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -497,8 +497,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
   }
 
   /** {@inheritDoc} */
-  public long nextGenerationStamp(Block block) throws IOException{
-    return namesystem.nextGenerationStampForBlock(block);
+  public long nextGenerationStamp(Block block, boolean fromNN) throws IOException{
+    return namesystem.nextGenerationStampForBlock(block, fromNN);
   }
 
   /** {@inheritDoc} */

+ 10 - 6
src/hdfs/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java

@@ -35,10 +35,10 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  **********************************************************************/
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 19: SendHeartbeat returns an array of DatanodeCommand objects
-   *     in stead of a DatanodeCommand object.
+   * 20: nextGenerationStamp has a new parameter indicating if it is for
+   * NameNode initiated lease recovery or not
    */
-  public static final long versionID = 19L;
+  public static final long versionID = 20L;
   
   // error code
   final static int NOTIFY = 0;
@@ -142,10 +142,14 @@ public interface DatanodeProtocol extends VersionedProtocol {
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
   
   /**
-   * @return the next GenerationStamp to be associated with the specified
-   * block. 
+   * Get the next GenerationStamp to be associated with the specified
+   * block.
+   * 
+   * @param block block
+   * @param fromNN if it is for lease recovery initiated by NameNode
+   * @return a new generation stamp
    */
-  public long nextGenerationStamp(Block block) throws IOException;
+  public long nextGenerationStamp(Block block, boolean fromNN) throws IOException;
 
   /**
    * Commit block synchronization in lease recovery