瀏覽代碼

HADOOP-12891. S3AFileSystem should configure Multipart Copy threshold and chunk size. (Andrew Olson via stevel)

Steve Loughran 9 年之前
父節點
當前提交
19f0f9608e

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -867,7 +867,10 @@
 <property>
   <name>fs.s3a.multipart.threshold</name>
   <value>2147483647</value>
-  <description>Threshold before uploads or copies use parallel multipart operations.</description>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    This also controls the partition size in renamed files, as rename() involves
+    copying the source file(s)
+  </description>
 </property>
 
 <property>

+ 2 - 0
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

@@ -260,6 +260,8 @@ public class S3AFileSystem extends FileSystem {
     TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
     transferConfiguration.setMinimumUploadPartSize(partSize);
     transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
+    transferConfiguration.setMultipartCopyPartSize(partSize);
+    transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
 
     transfers = new TransferManager(s3, threadPoolExecutor);
     transfers.setConfiguration(transferConfiguration);

+ 3 - 1
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md

@@ -312,7 +312,9 @@ this capability.
     <property>
       <name>fs.s3a.multipart.size</name>
       <value>104857600</value>
-      <description>How big (in bytes) to split upload or copy operations up into.</description>
+      <description>How big (in bytes) to split upload or copy operations up into.
+      This also controls the partition size in renamed files, as rename() involves
+      copying the source file(s)</description>
     </property>
 
     <property>