Prechádzať zdrojové kódy

Merge branch 'trunk' into HDFS-6581

arp 11 rokov pred
rodič
commit
222bf0fe67

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -131,7 +132,7 @@ public class TestRpcProgramNfs3 {
     String testRoot = fsHelper.getTestRootDir();
     testRootDir = new File(testRoot).getAbsoluteFile();
     final Path jksPath = new Path(testRootDir.toString(), "test.jks");
-    config.set(KeyProviderFactory.KEY_PROVIDER_PATH,
+    config.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
         JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);
 

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -564,6 +564,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6970. Move startFile EDEK retries to the DFSClient. (wang)
 
+    HDFS-6948. DN rejects blocks if it has older UC block
+    (Eric Payne via kihwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -774,6 +777,11 @@ Release 2.6.0 - UNRELEASED
     HDFS-6840. Clients are always sent to the same datanode when read
     is off rack. (wang)
 
+    HDFS-7065. Pipeline close recovery race can cause block corruption (kihwal)
+
+    HDFS-7096. Fix TestRpcProgramNfs3 to use DFS_ENCRYPTION_KEY_PROVIDER_URI
+    (clamb via cmccabe)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
       HDFS-6387. HDFS CLI admin tool for creating & deleting an

+ 12 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -948,7 +948,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   }
 
   @Override // FsDatasetSpi
-  public String recoverClose(ExtendedBlock b, long newGS,
+  public synchronized String recoverClose(ExtendedBlock b, long newGS,
       long expectedBlockLen) throws IOException {
     LOG.info("Recover failed close " + b);
     // check replica's state
@@ -1152,9 +1152,17 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       ExtendedBlock b) throws IOException {
     ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId());
     if (replicaInfo != null) {
-      throw new ReplicaAlreadyExistsException("Block " + b +
-          " already exists in state " + replicaInfo.getState() +
-          " and thus cannot be created.");
+      if (replicaInfo.getGenerationStamp() < b.getGenerationStamp()
+          && replicaInfo instanceof ReplicaInPipeline) {
+        // Stop the previous writer
+        ((ReplicaInPipeline)replicaInfo)
+                      .stopWriter(datanode.getDnConf().getXceiverStopTimeout());
+        invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
+      } else {
+        throw new ReplicaAlreadyExistsException("Block " + b +
+            " already exists in state " + replicaInfo.getState() +
+            " and thus cannot be created.");
+      }
     }
     
     FsVolumeImpl v = volumes.getNextVolume(storageType, b.getNumBytes());

+ 24 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java

@@ -111,7 +111,7 @@ public class TestWriteToReplica {
   
   // test writeToTemporary
   @Test
-  public void testWriteToTempoary() throws Exception {
+  public void testWriteToTemporary() throws Exception {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
     try {
       cluster.waitActive();
@@ -475,5 +475,28 @@ public class TestWriteToReplica {
     }
     
     dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+
+    try {
+      dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+      Assert.fail("Should not have created a replica that had already been "
+          + "created " + blocks[NON_EXISTENT]);
+    } catch (Exception e) {
+      Assert.assertTrue(
+          e.getMessage().contains(blocks[NON_EXISTENT].getBlockName()));
+      Assert.assertTrue(e instanceof ReplicaAlreadyExistsException);
+    }
+
+    long newGenStamp = blocks[NON_EXISTENT].getGenerationStamp() * 10;
+    blocks[NON_EXISTENT].setGenerationStamp(newGenStamp);
+    try {
+      ReplicaInPipeline replicaInfo =
+                dataSet.createTemporary(StorageType.DEFAULT, blocks[NON_EXISTENT]);
+      Assert.assertTrue(replicaInfo.getGenerationStamp() == newGenStamp);
+      Assert.assertTrue(
+          replicaInfo.getBlockId() == blocks[NON_EXISTENT].getBlockId());
+    } catch (ReplicaAlreadyExistsException e) {
+      Assert.fail("createRbw() Should have removed the block with the older "
+          + "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
+    }
   }
 }