MAPREDUCE-6724. Single shuffle to memory must not exceed Integer#MAX_VALUE. (Haibo Chen via gera)

(cherry picked from commit 6890d5b472)

Conflicts:

	hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java
This commit is contained in:
Jason Lowe 2016-10-28 14:40:28 +00:00
parent 8a664eba7d
commit 0a405c4f71
2 changed files with 29 additions and 11 deletions

View File

@ -99,7 +99,9 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
private long usedMemory;
private long commitMemory;
private final long maxSingleShuffleLimit;
@VisibleForTesting
final long maxSingleShuffleLimit;
private final int memToMemMergeOutputsThreshold;
private final long mergeThreshold;
@ -187,10 +189,16 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
usedMemory = 0L;
commitMemory = 0L;
this.maxSingleShuffleLimit =
(long)(memoryLimit * singleShuffleMemoryLimitPercent);
this.memToMemMergeOutputsThreshold =
jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
long maxSingleShuffleLimitConfiged =
(long)(memoryLimit * singleShuffleMemoryLimitPercent);
if(maxSingleShuffleLimitConfiged > Integer.MAX_VALUE) {
maxSingleShuffleLimitConfiged = Integer.MAX_VALUE;
LOG.info("The max number of bytes for a single in-memory shuffle cannot" +
" be larger than Integer.MAX_VALUE. Setting it to Integer.MAX_VALUE");
}
this.maxSingleShuffleLimit = maxSingleShuffleLimitConfiged;
this.memToMemMergeOutputsThreshold =
jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor);
this.mergeThreshold = (long)(this.memoryLimit *
jobConf.getFloat(
MRJobConfig.SHUFFLE_MERGE_PERCENT,
@ -249,17 +257,13 @@ public class MergeManagerImpl<K, V> implements MergeManager<K, V> {
public void waitForResource() throws InterruptedException {
inMemoryMerger.waitForMerge();
}
private boolean canShuffleToMemory(long requestedSize) {
return (requestedSize < maxSingleShuffleLimit);
}
@Override
public synchronized MapOutput<K,V> reserve(TaskAttemptID mapId,
long requestedSize,
int fetcher
) throws IOException {
if (!canShuffleToMemory(requestedSize)) {
if (requestedSize > maxSingleShuffleLimit) {
LOG.info(mapId + ": Shuffling to disk since " + requestedSize +
" is greater than maxSingleShuffleLimit (" +
maxSingleShuffleLimit + ")");

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MROutputFiles;
import org.apache.hadoop.mapred.MapOutputFile;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath;
import org.junit.Assert;
import org.junit.Test;
@ -288,5 +289,18 @@ public class TestMergeManager {
final long maxInMemReduce = mgr.getMaxInMemReduceLimit();
assertTrue("Large in-memory reduce area unusable: " + maxInMemReduce,
maxInMemReduce > Integer.MAX_VALUE);
assertEquals("maxSingleShuffleLimit to be capped at Integer.MAX_VALUE",
Integer.MAX_VALUE, mgr.maxSingleShuffleLimit);
verifyReservedMapOutputType(mgr, 10L, "MEMORY");
verifyReservedMapOutputType(mgr, 1L + Integer.MAX_VALUE, "DISK");
}
private void verifyReservedMapOutputType(MergeManagerImpl<Text, Text> mgr,
long size, String expectedShuffleMode) throws IOException {
final TaskAttemptID mapId = TaskAttemptID.forName("attempt_0_1_m_1_1");
final MapOutput<Text, Text> mapOutput = mgr.reserve(mapId, size, 1);
assertEquals("Shuffled bytes: " + size, expectedShuffleMode,
mapOutput.getDescription());
mgr.unreserve(size);
}
}