فهرست منبع

HDFS-15801. Backport HDFS-14582 to branch-2.10 (Failed to start DN with ArithmeticException when NULL checksum used) (#2659)

Co-authored-by: Symious <yiyang0203@gmail.com>
Symious 4 سال پیش
والد
کامیت
5dd6db1856

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java

@@ -68,6 +68,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ShutdownHookManager;
@@ -781,6 +782,10 @@ class BlockPoolSlice {
         // read and handle the common header here. For now just a version
         final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
             checksumIn, metaFile);
+        if (Type.NULL.equals(checksum.getChecksumType())) {
+          // in case of NULL checksum type consider full file as valid
+          return blockFileLen;
+        }
         int bytesPerChecksum = checksum.getBytesPerChecksum();
         int checksumSize = checksum.getChecksumSize();
         long numChunks = Math.min(

+ 43 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java

@@ -21,9 +21,12 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@@ -177,4 +181,43 @@ public class TestDFSInputStream {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testNullCheckSumWhenDNRestarted()
+      throws IOException, InterruptedException {
+    Configuration conf = new Configuration();
+    conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .build();
+    cluster.waitActive();
+    try {
+      DistributedFileSystem fs = cluster.getFileSystem();
+
+      int chunkSize = 512;
+      Random r = new Random(12345L);
+      byte[] data = new byte[chunkSize];
+      r.nextBytes(data);
+
+      Path file = new Path("/testfile");
+      try (FSDataOutputStream fout = fs.create(file)) {
+        fout.write(data);
+        fout.hflush();
+        cluster.restartDataNode(0, true, true);
+      }
+
+      // wait for block to load
+      Thread.sleep(1000);
+
+      // fetch live DN
+      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().fetchDatanodes(live, null, false);
+      assertTrue("DN start should be success and live dn should be 2",
+          live.size() == 2);
+      assertTrue("File size should be " + chunkSize,
+          fs.getFileStatus(file).getLen() == chunkSize);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }