Browse Source

HDDS-885. Fix test failures due to ChecksumData. Contributed by Hanisha Koneru.

Bharat Viswanadham 6 years ago
parent
commit
ef3b03b75a

+ 11 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.hdds.scm.storage;
 import org.apache.hadoop.hdds.scm.XceiverClientAsyncReply;
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .BlockNotCommittedException;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -305,10 +307,16 @@ public final class ContainerProtocolCalls  {
     KeyValue keyValue =
         KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
             .build();
+    Checksum checksum = new Checksum();
+    ChecksumData checksumData = checksum.computeChecksum(data);
     ChunkInfo chunk =
-        ChunkInfo.newBuilder().setChunkName(blockID.getLocalID()
-            + "_chunk").setOffset(0).setLen(data.length).
-            addMetadata(keyValue).build();
+        ChunkInfo.newBuilder()
+            .setChunkName(blockID.getLocalID() + "_chunk")
+            .setOffset(0)
+            .setLen(data.length)
+            .addMetadata(keyValue)
+            .setChecksumData(checksumData.getProtoBufMessage())
+            .build();
 
     PutSmallFileRequestProto putSmallFileRequest =
         PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)

+ 3 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java

@@ -342,8 +342,9 @@ public final class OzoneConfigKeys {
   public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "SHA256";
   public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM =
       "ozone.client.bytes.per.checksum";
-  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT =
-      1024 * 1024; // 1 MB
+  public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB";
+  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES =
+      1024 * 1024;
   public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 1024;
 
   /**

+ 12 - 2
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java

@@ -24,7 +24,9 @@ import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
+    .ChecksumType;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -67,7 +69,7 @@ public class Checksum {
     this.checksumType = ChecksumType.valueOf(
         OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT);
     this.bytesPerChecksum = OzoneConfigKeys
-        .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT;
+        .OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES; // Default is 1MB
   }
 
   /**
@@ -236,4 +238,12 @@ public class Checksum {
 
     return checksumData.verifyChecksumDataMatches(computedChecksumData);
   }
+
+  /**
+   * Returns a ChecksumData with type NONE for testing.
+   */
+  @VisibleForTesting
+  public static ContainerProtos.ChecksumData getNoChecksumDataProto() {
+    return new ChecksumData(ChecksumType.NONE, 0).getProtoBufMessage();
+  }
 }

+ 8 - 1
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfo.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import java.io.IOException;
 import java.util.Map;
 import java.util.TreeMap;
+import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.common.ChecksumData;
 
 /**
@@ -105,7 +106,13 @@ public class ChunkInfo {
     builder.setChunkName(this.getChunkName());
     builder.setOffset(this.getOffset());
     builder.setLen(this.getLen());
-    builder.setChecksumData(this.checksumData.getProtoBufMessage());
+    if (checksumData == null) {
+      // ChecksumData cannot be null while computing the protobufMessage.
+      // Set it to NONE type (equivalent to non checksum).
+      builder.setChecksumData(Checksum.getNoChecksumDataProto());
+    } else {
+      builder.setChecksumData(this.checksumData.getProtoBufMessage());
+    }
 
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
       ContainerProtos.KeyValue.Builder keyValBuilder =

+ 18 - 0
hadoop-hdds/common/src/main/resources/ozone-default.xml

@@ -1397,5 +1397,23 @@
     </description>
   </property>
 
+  <property>
+    <name>ozone.client.checksum.type</name>
+    <value>SHA256</value>
+    <tag>OZONE, CLIENT, MANAGEMENT</tag>
+    <description>The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] determines
+      which algorithm would be used to compute checksum for chunk data.
+      Default checksum type is SHA256.
+    </description>
+  </property>
+
+  <property>
+    <name>ozone.client.bytes.per.checksum</name>
+    <value>1MB</value>
+    <tag>OZONE, CLIENT, MANAGEMENT</tag>
+    <description>Checksum will be computed for every bytes per checksum number
+      of bytes and stored sequentially.
+    </description>
+  </property>
 
 </configuration>

+ 2 - 0
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .WriteChunkRequestProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
@@ -199,6 +200,7 @@ public class TestHddsDispatcher {
                 + containerId + "_chunk_" + localId)
         .setOffset(0)
         .setLen(data.size())
+        .setChecksumData(Checksum.getNoChecksumDataProto())
         .build();
 
     WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto

+ 3 - 2
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java

@@ -171,9 +171,10 @@ public class RpcClient implements ClientProtocol {
             OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT,
             TimeUnit.MILLISECONDS);
 
-    int configuredChecksumSize = conf.getInt(
+    int configuredChecksumSize = (int) conf.getStorageSize(
         OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM,
-        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT);
+        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT,
+        StorageUnit.BYTES);
     int checksumSize;
     if(configuredChecksumSize <
         OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) {

+ 2 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -140,8 +141,7 @@ public class TestBlockDeletingService {
                   .setChunkName(chunk.getAbsolutePath())
                   .setLen(0)
                   .setOffset(0)
-                  .setChecksumData(
-                      ContainerProtos.ChecksumData.getDefaultInstance())
+                  .setChecksumData(Checksum.getNoChecksumDataProto())
                   .build();
           chunks.add(info);
         }

+ 6 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.ozone.common.Checksum;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -41,7 +42,11 @@ public class TestBlockData {
   static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
       long len) {
     return ContainerProtos.ChunkInfo.newBuilder()
-        .setChunkName(name).setOffset(offset).setLen(len).build();
+        .setChunkName(name)
+        .setOffset(offset)
+        .setLen(len)
+        .setChecksumData(Checksum.getNoChecksumDataProto())
+        .build();
   }
 
   @Test

+ 3 - 2
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/storage/DistributedStorageHandler.java

@@ -132,9 +132,10 @@ public final class DistributedStorageHandler implements StorageHandler {
             OzoneConfigKeys.OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT,
             TimeUnit.MILLISECONDS);
 
-    int configuredChecksumSize = conf.getInt(
+    int configuredChecksumSize = (int) conf.getStorageSize(
         OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM,
-        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT);
+        OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT,
+        StorageUnit.BYTES);
     int checksumSize;
     if(configuredChecksumSize <
         OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE) {