Parcourir la source

HDFS-8576. Lease recovery should return true if the lease can be released and the file can be closed. Contributed by J.Andreina

Tsz-Wo Nicholas Sze il y a 10 ans
Parent
commit
7a248f2e9e

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -125,6 +125,9 @@ Release 2.7.1 - UNRELEASED
     HDFS-8600. TestWebHdfsFileSystemContract.testGetFileBlockLocations fails
     in branch-2.7. (Arpit Agarwal)
 
+    HDFS-8576.  Lease recovery should return true if the lease can be released
+    and the file can be closed.  (J.Andreina via szetszwo)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

+ 13 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2808,7 +2808,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @param src the path of the file to start lease recovery
    * @param holder the lease holder's name
    * @param clientMachine the client machine's name
-   * @return true if the file is already closed
+   * @return true if the file is already closed or
+   *         if the lease can be released and the file can be closed.
    * @throws IOException
    */
   boolean recoverLease(String src, String holder, String clientMachine)
@@ -2835,7 +2836,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         dir.checkPathAccess(pc, iip, FsAction.WRITE);
       }
   
-      recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE,
+      return recoverLeaseInternal(RecoverLeaseOp.RECOVER_LEASE,
           iip, src, holder, clientMachine, true);
     } catch (StandbyException se) {
       skipSync = true;
@@ -2848,7 +2849,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         getEditLog().logSync();
       }
     }
-    return false;
   }
 
   private enum RecoverLeaseOp {
@@ -2864,12 +2864,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
-  void recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
+  boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip,
       String src, String holder, String clientMachine, boolean force)
       throws IOException {
     assert hasWriteLock();
     INodeFile file = iip.getLastINode().asFile();
-    if (file != null && file.isUnderConstruction()) {
+    if (file.isUnderConstruction()) {
       //
       // If the file is under construction , then it must be in our
       // leases. Find the appropriate lease record.
@@ -2902,7 +2902,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         // close only the file src
         LOG.info("recoverLease: " + lease + ", src=" + src +
           " from client " + clientName);
-        internalReleaseLease(lease, src, iip, holder);
+        return internalReleaseLease(lease, src, iip, holder);
       } else {
         assert lease.getHolder().equals(clientName) :
           "Current lease holder " + lease.getHolder() +
@@ -2914,11 +2914,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         if (lease.expiredSoftLimit()) {
           LOG.info("startFile: recover " + lease + ", src=" + src + " client "
               + clientName);
-          boolean isClosed = internalReleaseLease(lease, src, iip, null);
-          if(!isClosed)
+          if (internalReleaseLease(lease, src, iip, null)) {
+            return true;
+          } else {
             throw new RecoveryInProgressException(
                 op.getExceptionMessage(src, holder, clientMachine,
                     "lease recovery is in progress. Try again later."));
+          }
         } else {
           final BlockInfoContiguous lastBlock = file.getLastBlock();
           if (lastBlock != null
@@ -2935,7 +2937,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
           }
         }
       }
-    }
+    } else {
+      return true;
+     }
   }
 
   /**

+ 46 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.TestInterDatanodePr
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.EnumSetWritable;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.Test;
@@ -212,4 +214,48 @@ public class TestLeaseRecovery {
     assertTrue("File should be closed", newdfs.recoverLease(file));
 
   }
+
+  /**
+   * Recover the lease on a file and append file from another client.
+   */
+  @Test
+  public void testLeaseRecoveryAndAppend() throws Exception {
+    Configuration conf = new Configuration();
+    try{
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    Path file = new Path("/testLeaseRecovery");
+    DistributedFileSystem dfs = cluster.getFileSystem();
+
+    // create a file with 0 bytes
+    FSDataOutputStream out = dfs.create(file);
+    out.hflush();
+    out.hsync();
+
+    // abort the original stream
+    ((DFSOutputStream) out.getWrappedStream()).abort();
+    DistributedFileSystem newdfs =
+        (DistributedFileSystem) FileSystem.newInstance
+        (cluster.getConfiguration(0));
+
+    // Append to a file , whose lease is held by another client should fail
+    try {
+        newdfs.append(file);
+        fail("Append to a file(lease is held by another client) should fail");
+    } catch (RemoteException e) {
+      assertTrue(e.getMessage().contains("file lease is currently owned"));
+    }
+
+    // Lease recovery on first try should be successful
+    boolean recoverLease = newdfs.recoverLease(file);
+    assertTrue(recoverLease);
+    FSDataOutputStream append = newdfs.append(file);
+    append.write("test".getBytes());
+    append.close();
+    }finally{
+      if (cluster != null) {
+        cluster.shutdown();
+        cluster = null;
+      }
+    }
+  }
 }