HADOOP-12891. S3AFileSystem should configure Multipart Copy threshold and chunk size. (Andrew Olson via stevel)
This commit is contained in:
parent
adf937fbc0
commit
5df89f9a8a
|
@ -840,7 +840,10 @@
|
|||
<property>
|
||||
<name>fs.s3a.multipart.threshold</name>
|
||||
<value>2147483647</value>
|
||||
<description>Threshold before uploads or copies use parallel multipart operations.</description>
|
||||
<description>How big (in bytes) to split upload or copy operations up into.
|
||||
This also controls the partition size in renamed files, as rename() involves
|
||||
copying the source file(s)
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
|
|
|
@ -318,6 +318,8 @@ public class S3AFileSystem extends FileSystem {
|
|||
TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
|
||||
transferConfiguration.setMinimumUploadPartSize(partSize);
|
||||
transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
|
||||
transferConfiguration.setMultipartCopyPartSize(partSize);
|
||||
transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
|
||||
|
||||
transfers = new TransferManager(s3, threadPoolExecutor);
|
||||
transfers.setConfiguration(transferConfiguration);
|
||||
|
|
|
@ -315,7 +315,9 @@ this capability.
|
|||
<property>
|
||||
<name>fs.s3a.multipart.size</name>
|
||||
<value>104857600</value>
|
||||
<description>How big (in bytes) to split upload or copy operations up into.</description>
|
||||
<description>How big (in bytes) to split upload or copy operations up into.
|
||||
This also controls the partition size in renamed files, as rename() involves
|
||||
copying the source file(s)</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
|
|
Loading…
Reference in New Issue