HADOOP-12891. S3AFileSystem should configure Multipart Copy threshold and chunk size. (Andrew Olson via stevel)

This commit is contained in:
Steve Loughran 2016-04-22 11:24:24 +01:00
parent 337bcde1dc
commit 19f0f9608e
3 changed files with 9 additions and 2 deletions

View File

@ -867,7 +867,10 @@
<property>
<name>fs.s3a.multipart.threshold</name>
<value>2147483647</value>
<description>Threshold before uploads or copies use parallel multipart operations.</description>
<description>How big (in bytes) to split upload or copy operations up into.
This also controls the partition size in renamed files, as rename() involves
copying the source file(s)
</description>
</property>
<property>

View File

@ -260,6 +260,8 @@ private void initTransferManager() {
TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
transferConfiguration.setMinimumUploadPartSize(partSize);
transferConfiguration.setMultipartUploadThreshold(multiPartThreshold);
transferConfiguration.setMultipartCopyPartSize(partSize);
transferConfiguration.setMultipartCopyThreshold(multiPartThreshold);
transfers = new TransferManager(s3, threadPoolExecutor);
transfers.setConfiguration(transferConfiguration);

View File

@ -312,7 +312,9 @@ this capability.
<property>
<name>fs.s3a.multipart.size</name>
<value>104857600</value>
<description>How big (in bytes) to split upload or copy operations up into.</description>
<description>How big (in bytes) to split upload or copy operations up into.
This also controls the partition size in renamed files, as rename() involves
copying the source file(s)</description>
</property>
<property>