소스 검색

HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is disabled. Contributed by Laurent Goujon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1562927 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 년 전
부모
커밋
3d9ad8e3b6

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -304,6 +304,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
     docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
 
+    HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is 
+    disabled. (Laurent Goujon via jing9)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -655,8 +655,9 @@ class DataXceiver extends Receiver implements Runnable {
       final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
       final DataChecksum checksum = header.getChecksum(); 
       final int bytesPerCRC = checksum.getBytesPerChecksum();
-      final long crcPerBlock = (metadataIn.getLength()
-          - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
+      final long crcPerBlock = checksum.getChecksumSize() > 0 
+              ? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
+              : 0;
       
       //compute block checksum
       final MD5Hash md5 = MD5Hash.digest(checksumIn);

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -71,7 +71,7 @@ public class TestFSOutputSummer {
     cleanupFile(name);
   }
   
-  /* create a file, write data with vairable amount of data */
+  /* create a file, write data with variable amount of data */
   private void writeFile3(Path name) throws Exception {
     FSDataOutputStream stm = fileSys.create(name, true, 
         fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
@@ -103,6 +103,8 @@ public class TestFSOutputSummer {
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     stm.close();
+    // do a sanity check. Get the file checksum
+    fileSys.getFileChecksum(name);
   }
 
   private void cleanupFile(Path name) throws IOException {
@@ -112,13 +114,20 @@ public class TestFSOutputSummer {
   }
   
   /**
-   * Test write opeation for output stream in DFS.
+   * Test write operation for output stream in DFS.
    */
   @Test
   public void testFSOutputSummer() throws Exception {
+    doTestFSOutputSummer("CRC32");
+    doTestFSOutputSummer("CRC32C");
+    doTestFSOutputSummer("NULL");
+  }
+  
+  private void doTestFSOutputSummer(String checksumType) throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
+    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(NUM_OF_DATANODES)
                                                .build();