Browse Source

HADOOP-15735. backport HADOOP-11687 Intermittent signature match failures in S3AFileSystem.
Contributed by MunShik JOUNG.

Steve Loughran 6 years ago
parent
commit
b9b737732f

+ 60 - 1
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -1086,7 +1087,7 @@ public class S3AFileSystem extends FileSystem {
     }
 
     ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-    final ObjectMetadata dstom = srcom.clone();
+    ObjectMetadata dstom = cloneObjectMetadata(srcom);
     if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
       dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
     }
@@ -1192,6 +1193,64 @@ public class S3AFileSystem extends FileSystem {
     statistics.incrementWriteOps(1);
   }
 
+  /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+    // This approach may be too brittle, especially if
+    // in future there are new attributes added to ObjectMetadata
+    // that we do not explicitly call to set here
+    ObjectMetadata ret = new ObjectMetadata();
+
+    // Non null attributes
+    ret.setContentLength(source.getContentLength());
+
+    // Possibly null attributes
+    // Allowing nulls to pass breaks it during later use
+    if (source.getCacheControl() != null) {
+      ret.setCacheControl(source.getCacheControl());
+    }
+    if (source.getContentDisposition() != null) {
+      ret.setContentDisposition(source.getContentDisposition());
+    }
+    if (source.getContentEncoding() != null) {
+      ret.setContentEncoding(source.getContentEncoding());
+    }
+    if (source.getContentMD5() != null) {
+      ret.setContentMD5(source.getContentMD5());
+    }
+    if (source.getContentType() != null) {
+      ret.setContentType(source.getContentType());
+    }
+    if (source.getExpirationTime() != null) {
+      ret.setExpirationTime(source.getExpirationTime());
+    }
+    if (source.getExpirationTimeRuleId() != null) {
+      ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+    }
+    if (source.getHttpExpiresDate() != null) {
+      ret.setHttpExpiresDate(source.getHttpExpiresDate());
+    }
+    if (source.getLastModified() != null) {
+      ret.setLastModified(source.getLastModified());
+    }
+    if (source.getOngoingRestore() != null) {
+      ret.setOngoingRestore(source.getOngoingRestore());
+    }
+    if (source.getRestoreExpirationTime() != null) {
+      ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+    }
+
+    for (Map.Entry<String, String> e : source.getUserMetadata().entrySet()) {
+      ret.addUserMetadata(e.getKey(), e.getValue());
+    }
+    return ret;
+  }
+
   /**
    * Return the number of bytes that large input files should be optimally
    * be split into to minimize i/o time.