diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java index ff16e443f5e..7e8e67d3c34 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java @@ -109,6 +109,10 @@ public class DistCpConstants { /* DistCp CopyListing class override param */ public static final String CONF_LABEL_COPY_LISTING_CLASS = "distcp.copy.listing.class"; + /* DistCp Copy Buffer Size */ + public static final String CONF_LABEL_COPY_BUFFER_SIZE = + "distcp.copy.buffer.size"; + /** * Constants for DistCp return code to shell / consumer of ToolRunner's run */ @@ -141,4 +145,6 @@ public class DistCpConstants { public static final String HDFS_RESERVED_RAW_DIRECTORY_NAME = "/.reserved/raw"; static final String HDFS_DISTCP_DIFF_DIRECTORY_NAME = ".distcp.diff.tmp"; + + public static final int COPY_BUFFER_SIZE_DEFAULT = 8 * 1024; } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java index c34005e6c4b..fc047cadadc 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpContext.java @@ -175,6 +175,10 @@ public class DistCpContext { return options.getBlocksPerChunk() > 0; } + public int getCopyBufferSize() { + return options.getCopyBufferSize(); + } + public void setTargetPathExists(boolean targetPathExists) { this.targetPathExists = targetPathExists; } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java index 81abb7df13d..016172e02f6 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java @@ -179,6 +179,14 @@ public enum DistCpOptionSwitch { + "system implements getBlockLocations method and the target file " + "system implements concat method")), + /** + * Configurable copy buffer size. + */ + COPY_BUFFER_SIZE(DistCpConstants.CONF_LABEL_COPY_BUFFER_SIZE, + new Option("copybuffersize", true, "Size of the copy buffer to use. " + + "By default is " + + DistCpConstants.COPY_BUFFER_SIZE_DEFAULT + "B.")), + /** * Specify bandwidth per map in MB, accepts bandwidth as a fraction */ diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java index 97ae0c4ef1a..af6cb8be03a 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java @@ -143,6 +143,8 @@ public final class DistCpOptions { // to copy in parallel. Default is 0 and file are not splitted. private final int blocksPerChunk; + private final int copyBufferSize; + /** * File attributes for preserve. * @@ -200,6 +202,8 @@ public final class DistCpOptions { this.preserveStatus = builder.preserveStatus; this.blocksPerChunk = builder.blocksPerChunk; + + this.copyBufferSize = builder.copyBufferSize; } public Path getSourceFileListing() { @@ -302,7 +306,7 @@ public final class DistCpOptions { } /** - * Checks if the input attribute should be preserved or not + * Checks if the input attribute should be preserved or not. * * @param attribute - Attribute to check * @return True if attribute should be preserved, false otherwise @@ -315,6 +319,10 @@ public final class DistCpOptions { return blocksPerChunk; } + public int getCopyBufferSize() { + return copyBufferSize; + } + /** * Add options to configuration. These will be used in the Mapper/committer * @@ -351,6 +359,8 @@ public final class DistCpOptions { } DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.BLOCKS_PER_CHUNK, String.valueOf(blocksPerChunk)); + DistCpOptionSwitch.addToConf(conf, DistCpOptionSwitch.COPY_BUFFER_SIZE, + String.valueOf(copyBufferSize)); } /** @@ -385,6 +395,7 @@ public final class DistCpOptions { ", targetPath=" + targetPath + ", filtersFile='" + filtersFile + '\'' + ", blocksPerChunk=" + blocksPerChunk + + ", copyBufferSize=" + copyBufferSize + '}'; } @@ -429,6 +440,9 @@ public final class DistCpOptions { private int blocksPerChunk = 0; + private int copyBufferSize = + DistCpConstants.COPY_BUFFER_SIZE_DEFAULT; + public Builder(List sourcePaths, Path targetPath) { Preconditions.checkArgument(sourcePaths != null && !sourcePaths.isEmpty(), "Source paths should not be null or empty!"); @@ -664,6 +678,13 @@ public final class DistCpOptions { this.blocksPerChunk = newBlocksPerChunk; return this; } + + public Builder withCopyBufferSize(int newCopyBufferSize) { + this.copyBufferSize = + newCopyBufferSize > 0 ? newCopyBufferSize + : DistCpConstants.COPY_BUFFER_SIZE_DEFAULT; + return this; + } } } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java index 21ff0f86841..96fb1d918e4 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java @@ -213,6 +213,18 @@ public class OptionsParser { } } + if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { + final String copyBufferSizeStr = getVal(command, + DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); + try { + int copyBufferSize = Integer.parseInt(copyBufferSizeStr); + builder.withCopyBufferSize(copyBufferSize); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("copyBufferSize is invalid: " + + copyBufferSizeStr, e); + } + } + return builder.build(); } diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java index 2c17fef1a38..21f621adbec 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java @@ -38,6 +38,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.tools.CopyListingFileStatus; import org.apache.hadoop.tools.DistCpConstants; +import org.apache.hadoop.tools.DistCpOptionSwitch; import org.apache.hadoop.tools.DistCpOptions.FileAttribute; import org.apache.hadoop.tools.mapred.CopyMapper.FileAction; import org.apache.hadoop.tools.util.DistCpUtils; @@ -53,7 +54,6 @@ import com.google.common.annotations.VisibleForTesting; public class RetriableFileCopyCommand extends RetriableCommand { private static Log LOG = LogFactory.getLog(RetriableFileCopyCommand.class); - private static int BUFFER_SIZE = 8 * 1024; private boolean skipCrc = false; private FileAction action; @@ -169,6 +169,9 @@ public class RetriableFileCopyCommand extends RetriableCommand { throws IOException { FsPermission permission = FsPermission.getFileDefault().applyUMask( FsPermission.getUMask(targetFS.getConf())); + int copyBufferSize = context.getConfiguration().getInt( + DistCpOptionSwitch.COPY_BUFFER_SIZE.getConfigLabel(), + DistCpConstants.COPY_BUFFER_SIZE_DEFAULT); final OutputStream outStream; if (action == FileAction.OVERWRITE) { // If there is an erasure coding policy set on the target directory, @@ -180,14 +183,14 @@ public class RetriableFileCopyCommand extends RetriableCommand { targetFS, targetPath); FSDataOutputStream out = targetFS.create(targetPath, permission, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), - BUFFER_SIZE, repl, blockSize, context, + copyBufferSize, repl, blockSize, context, getChecksumOpt(fileAttributes, sourceChecksum)); outStream = new BufferedOutputStream(out); } else { outStream = new BufferedOutputStream(targetFS.append(targetPath, - BUFFER_SIZE)); + copyBufferSize)); } - return copyBytes(source, sourceOffset, outStream, BUFFER_SIZE, + return copyBytes(source, sourceOffset, outStream, copyBufferSize, context); } diff --git a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm index a77deb2ffee..ee0a93e9e8f 100644 --- a/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm +++ b/hadoop-tools/hadoop-distcp/src/site/markdown/DistCp.md.vm @@ -238,6 +238,7 @@ Flag | Description | Notes `-numListstatusThreads` | Number of threads to use for building file listing | At most 40 threads. `-skipcrccheck` | Whether to skip CRC checks between source and target paths. | `-blocksperchunk ` | Number of blocks per chunk. When specified, split files into chunks to copy in parallel | If set to a positive value, files with more blocks than this value will be split into chunks of `` blocks to be transferred in parallel, and reassembled on the destination. By default, `` is 0 and the files will be transmitted in their entirety without splitting. This switch is only applicable when the source file system implements getBlockLocations method and the target file system implements concat method. | +`-copybuffersize ` | Size of the copy buffer to use. By default, `` is set to 8192B | Architecture of DistCp ---------------------- diff --git a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java index 35251943c02..6b59b974401 100644 --- a/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java +++ b/hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpOptions.java @@ -287,7 +287,7 @@ public class TestDistCpOptions { "mapBandwidth=0.0, copyStrategy='uniformsize', preserveStatus=[], " + "atomicWorkPath=null, logPath=null, sourceFileListing=abc, " + "sourcePaths=null, targetPath=xyz, filtersFile='null'," + - " blocksPerChunk=0}"; + " blocksPerChunk=0, copyBufferSize=8192}"; String optionString = option.toString(); Assert.assertEquals(val, optionString); Assert.assertNotSame(DistCpOptionSwitch.ATOMIC_COMMIT.toString(), @@ -497,4 +497,21 @@ public class TestDistCpOptions { Assert.assertFalse(builder.build().shouldAppend()); } + @Test + public void testSetCopyBufferSize() { + final DistCpOptions.Builder builder = new DistCpOptions.Builder( + Collections.singletonList(new Path("hdfs://localhost:8020/source")), + new Path("hdfs://localhost:8020/target/")); + + Assert.assertEquals(DistCpConstants.COPY_BUFFER_SIZE_DEFAULT, + builder.build().getCopyBufferSize()); + + builder.withCopyBufferSize(4194304); + Assert.assertEquals(4194304, + builder.build().getCopyBufferSize()); + + builder.withCopyBufferSize(-1); + Assert.assertEquals(DistCpConstants.COPY_BUFFER_SIZE_DEFAULT, + builder.build().getCopyBufferSize()); + } }