From cf7e9e7ef91ce3bf626d3ece9b85abf4bcf0d07d Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 24 Jan 2013 02:47:30 +0000 Subject: [PATCH 1/4] Add .classpath, .project, .settings and target to svn:ignore. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1437844 13f79535-47bb-0310-9956-ffa450edef68 From 16acd69e587956105897ccff2d4dd4b473aaf8bc Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 24 Jan 2013 21:51:34 +0000 Subject: [PATCH 2/4] HADOOP-9242. Duplicate surefire plugin config in hadoop-common. Contributed by Andrey Klochkov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1438213 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/pom.xml | 18 ++++++------------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 0c66e9d4359..f4e36c4e312 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1287,6 +1287,9 @@ Release 0.23.6 - UNRELEASED HADOOP-9217. Print thread dumps when hadoop-common tests fail. (Andrey Klochkov via suresh) + HADOOP-9242. Duplicate surefire plugin config in hadoop-common. + (Andrey Klochkov via suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 06420f2e6f7..10898fde73c 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -297,6 +297,12 @@ ${startKdc} ${kdc.resource.dir} + + + listener + org.apache.hadoop.test.TimedOutTestsListener + + @@ -481,18 +487,6 @@ - - org.apache.maven.plugins - maven-surefire-plugin - - - - listener - org.apache.hadoop.test.TimedOutTestsListener - - - - From e625e6d29683e5212b1b3c3ea294e0b7a6eb2325 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 24 Jan 2013 22:35:12 +0000 Subject: [PATCH 3/4] HADOOP-9245. mvn clean without running mvn install before fails. Contributed by Karthik Kambatla. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1438239 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/pom.xml | 1 + hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml | 1 + 3 files changed, 5 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index f4e36c4e312..97ae7b9103e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -149,6 +149,9 @@ Trunk (Unreleased) HADOOP-8924. Add maven plugin alternative to shell script to save package-info.java. (Chris Nauroth via suresh) + HADOOP-9245. mvn clean without running mvn install before fails. + (Karthik Kambatla via suresh) + BUG FIXES HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index 10898fde73c..23798da256d 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -274,6 +274,7 @@ version-info + compile version-info diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index 7b91597754e..df174c59b4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -93,6 +93,7 @@ version-info + compile version-info From 539153a6798a667d39f20972c5ae0936060e2cc1 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Fri, 25 Jan 2013 00:25:59 +0000 Subject: [PATCH 4/4] MAPREDUCE-2264. Job status exceeds 100% in some cases. (devaraj.k and sandyr via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1438277 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../java/org/apache/hadoop/mapred/Merger.java | 25 ++- .../task/reduce/MergeManagerImpl.java | 69 +++++--- .../task/reduce/OnDiskMapOutput.java | 5 +- .../org/apache/hadoop/mapred/TestMerger.java | 157 ++++++++++++++++++ 5 files changed, 236 insertions(+), 23 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMerger.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index be38a08089f..622b823fb6d 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -268,6 +268,9 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-4948. Fix a failing unit test TestYARNRunner.testHistoryServerToken. (Junping Du via sseth) + MAPREDUCE-2264. Job status exceeds 100% in some cases. + (devaraj.k and sandyr via tucu) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java index 484bd89cd4b..d0074707650 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java @@ -218,6 +218,7 @@ public class Merger { CompressionCodec codec = null; long segmentOffset = 0; long segmentLength = -1; + long rawDataLength = -1; Counters.Counter mapOutputsCounter = null; @@ -234,6 +235,15 @@ public class Merger { this(conf, fs, file, 0, fs.getFileStatus(file).getLen(), codec, preserve, mergedMapOutputsCounter); } + + public Segment(Configuration conf, FileSystem fs, Path file, + CompressionCodec codec, boolean preserve, + Counters.Counter mergedMapOutputsCounter, long rawDataLength) + throws IOException { + this(conf, fs, file, 0, fs.getFileStatus(file).getLen(), codec, preserve, + mergedMapOutputsCounter); + this.rawDataLength = rawDataLength; + } public Segment(Configuration conf, FileSystem fs, Path file, long segmentOffset, long segmentLength, @@ -261,6 +271,11 @@ public class Merger { public Segment(Reader reader, boolean preserve) { this(reader, preserve, null); } + + public Segment(Reader reader, boolean preserve, long rawDataLength) { + this(reader, preserve, null); + this.rawDataLength = rawDataLength; + } public Segment(Reader reader, boolean preserve, Counters.Counter mapOutputsCounter) { @@ -300,6 +315,10 @@ public class Merger { segmentLength : reader.getLength(); } + public long getRawDataLength() { + return (rawDataLength > 0) ? rawDataLength : getLength(); + } + boolean nextRawKey() throws IOException { return reader.nextRawKey(key); } @@ -633,7 +652,7 @@ public class Merger { totalBytesProcessed = 0; totalBytes = 0; for (int i = 0; i < segmentsToMerge.size(); i++) { - totalBytes += segmentsToMerge.get(i).getLength(); + totalBytes += segmentsToMerge.get(i).getRawDataLength(); } } if (totalBytes != 0) //being paranoid @@ -702,7 +721,7 @@ public class Merger { // size will match(almost) if combiner is not called in merge. long inputBytesOfThisMerge = totalBytesProcessed - bytesProcessedInPrevMerges; - totalBytes -= inputBytesOfThisMerge - tempSegment.getLength(); + totalBytes -= inputBytesOfThisMerge - tempSegment.getRawDataLength(); if (totalBytes != 0) { progPerByte = 1.0f / (float)totalBytes; } @@ -768,7 +787,7 @@ public class Merger { for (int i = 0; i < numSegments; i++) { // Not handling empty segments here assuming that it would not affect // much in calculation of mergeProgress. - segmentSizes.add(segments.get(i).getLength()); + segmentSizes.add(segments.get(i).getRawDataLength()); } // If includeFinalMerge is true, allow the following while loop iterate diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java index 007897f17f0..fb2fb61cf7d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java @@ -89,7 +89,7 @@ public class MergeManagerImpl implements MergeManager { new TreeSet>(new MapOutputComparator()); private final MergeThread, K,V> inMemoryMerger; - Set onDiskMapOutputs = new TreeSet(); + Set onDiskMapOutputs = new TreeSet(); private final OnDiskMerger onDiskMerger; private final long memoryLimit; @@ -336,7 +336,7 @@ public class MergeManagerImpl implements MergeManager { inMemoryMergedMapOutputs.size()); } - public synchronized void closeOnDiskFile(Path file) { + public synchronized void closeOnDiskFile(CompressAwarePath file) { onDiskMapOutputs.add(file); if (onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) { @@ -356,7 +356,7 @@ public class MergeManagerImpl implements MergeManager { List> memory = new ArrayList>(inMemoryMergedMapOutputs); memory.addAll(inMemoryMapOutputs); - List disk = new ArrayList(onDiskMapOutputs); + List disk = new ArrayList(onDiskMapOutputs); return finalMerge(jobConf, rfs, memory, disk); } @@ -456,6 +456,7 @@ public class MergeManagerImpl implements MergeManager { codec, null); RawKeyValueIterator rIter = null; + CompressAwarePath compressAwarePath; try { LOG.info("Initiating in-memory merge with " + noInMemorySegments + " segments..."); @@ -474,6 +475,8 @@ public class MergeManagerImpl implements MergeManager { combineCollector.setWriter(writer); combineAndSpill(rIter, reduceCombineInputCounter); } + compressAwarePath = new CompressAwarePath(outputPath, + writer.getRawLength()); writer.close(); LOG.info(reduceId + @@ -489,12 +492,12 @@ public class MergeManagerImpl implements MergeManager { } // Note the output of the merge - closeOnDiskFile(outputPath); + closeOnDiskFile(compressAwarePath); } } - private class OnDiskMerger extends MergeThread { + private class OnDiskMerger extends MergeThread { public OnDiskMerger(MergeManagerImpl manager) { super(manager, Integer.MAX_VALUE, exceptionReporter); @@ -503,7 +506,7 @@ public class MergeManagerImpl implements MergeManager { } @Override - public void merge(List inputs) throws IOException { + public void merge(List inputs) throws IOException { // sanity check if (inputs == null || inputs.isEmpty()) { LOG.info("No ondisk files to merge..."); @@ -518,8 +521,8 @@ public class MergeManagerImpl implements MergeManager { " map outputs on disk. Triggering merge..."); // 1. Prepare the list of files to be merged. - for (Path file : inputs) { - approxOutputSize += localFS.getFileStatus(file).getLen(); + for (CompressAwarePath file : inputs) { + approxOutputSize += localFS.getFileStatus(file.getPath()).getLen(); } // add the checksum length @@ -536,6 +539,7 @@ public class MergeManagerImpl implements MergeManager { (Class) jobConf.getMapOutputValueClass(), codec, null); RawKeyValueIterator iter = null; + CompressAwarePath compressAwarePath; Path tmpDir = new Path(reduceId.toString()); try { iter = Merger.merge(jobConf, rfs, @@ -548,13 +552,15 @@ public class MergeManagerImpl implements MergeManager { mergedMapOutputsCounter, null); Merger.writeFile(iter, writer, reporter, jobConf); + compressAwarePath = new CompressAwarePath(outputPath, + writer.getRawLength()); writer.close(); } catch (IOException e) { localFS.delete(outputPath, true); throw e; } - closeOnDiskFile(outputPath); + closeOnDiskFile(compressAwarePath); LOG.info(reduceId + " Finished merging " + inputs.size() + @@ -653,7 +659,7 @@ public class MergeManagerImpl implements MergeManager { private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, List> inMemoryMapOutputs, - List onDiskMapOutputs + List onDiskMapOutputs ) throws IOException { LOG.info("finalMerge called with " + inMemoryMapOutputs.size() + " in-memory map-outputs and " + @@ -712,7 +718,8 @@ public class MergeManagerImpl implements MergeManager { try { Merger.writeFile(rIter, writer, reporter, job); // add to list of final disk outputs. - onDiskMapOutputs.add(outputPath); + onDiskMapOutputs.add(new CompressAwarePath(outputPath, + writer.getRawLength())); } catch (IOException e) { if (null != outputPath) { try { @@ -742,15 +749,19 @@ public class MergeManagerImpl implements MergeManager { // segments on disk List> diskSegments = new ArrayList>(); long onDiskBytes = inMemToDiskBytes; - Path[] onDisk = onDiskMapOutputs.toArray(new Path[onDiskMapOutputs.size()]); - for (Path file : onDisk) { - onDiskBytes += fs.getFileStatus(file).getLen(); - LOG.debug("Disk file: " + file + " Length is " + - fs.getFileStatus(file).getLen()); - diskSegments.add(new Segment(job, fs, file, codec, keepInputs, + long rawBytes = inMemToDiskBytes; + CompressAwarePath[] onDisk = onDiskMapOutputs.toArray( + new CompressAwarePath[onDiskMapOutputs.size()]); + for (CompressAwarePath file : onDisk) { + long fileLength = fs.getFileStatus(file.getPath()).getLen(); + onDiskBytes += fileLength; + rawBytes += (file.getRawDataLength() > 0) ? file.getRawDataLength() : fileLength; + + LOG.debug("Disk file: " + file + " Length is " + fileLength); + diskSegments.add(new Segment(job, fs, file.getPath(), codec, keepInputs, (file.toString().endsWith( Task.MERGED_OUTPUT_PREFIX) ? - null : mergedMapOutputsCounter) + null : mergedMapOutputsCounter), file.getRawDataLength() )); } LOG.info("Merging " + onDisk.length + " files, " + @@ -786,7 +797,7 @@ public class MergeManagerImpl implements MergeManager { return diskMerge; } finalSegments.add(new Segment( - new RawKVIteratorReader(diskMerge, onDiskBytes), true)); + new RawKVIteratorReader(diskMerge, onDiskBytes), true, rawBytes)); } return Merger.merge(job, fs, keyClass, valueClass, finalSegments, finalSegments.size(), tmpDir, @@ -794,4 +805,24 @@ public class MergeManagerImpl implements MergeManager { null); } + + static class CompressAwarePath + { + private long rawDataLength; + + private Path path; + + public CompressAwarePath(Path path, long rawDataLength) { + this.path = path; + this.rawDataLength = rawDataLength; + } + + public long getRawDataLength() { + return rawDataLength; + } + + public Path getPath() { + return path; + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java index 2cb86449e5d..bf69798c124 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java @@ -37,6 +37,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl.CompressAwarePath; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -112,7 +113,9 @@ class OnDiskMapOutput extends MapOutput { @Override public void commit() throws IOException { localFS.rename(tmpOutputPath, outputPath); - merger.closeOnDiskFile(outputPath); + CompressAwarePath compressAwarePath = new CompressAwarePath(outputPath, + getSize()); + merger.closeOnDiskFile(compressAwarePath); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMerger.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMerger.java new file mode 100644 index 00000000000..9d9eef6d0b6 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestMerger.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapred; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.Counters.Counter; +import org.apache.hadoop.mapred.IFile.Reader; +import org.apache.hadoop.mapred.Merger.Segment; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.Progressable; +import org.junit.Test; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +public class TestMerger { + + @Test + public void testCompressed() throws IOException { + testMergeShouldReturnProperProgress(getCompressedSegments()); + } + + @Test + public void testUncompressed() throws IOException { + testMergeShouldReturnProperProgress(getUncompressedSegments()); + } + + @SuppressWarnings( { "deprecation", "unchecked" }) + public void testMergeShouldReturnProperProgress( + List> segments) throws IOException { + Configuration conf = new Configuration(); + JobConf jobConf = new JobConf(); + FileSystem fs = FileSystem.getLocal(conf); + Path tmpDir = new Path("localpath"); + Class keyClass = (Class) jobConf.getMapOutputKeyClass(); + Class valueClass = (Class) jobConf.getMapOutputValueClass(); + RawComparator comparator = jobConf.getOutputKeyComparator(); + Counter readsCounter = new Counter(); + Counter writesCounter = new Counter(); + Progress mergePhase = new Progress(); + RawKeyValueIterator mergeQueue = Merger.merge(conf, fs, keyClass, + valueClass, segments, 2, tmpDir, comparator, getReporter(), + readsCounter, writesCounter, mergePhase); + Assert.assertEquals(1.0f, mergeQueue.getProgress().get()); + } + + private Progressable getReporter() { + Progressable reporter = new Progressable() { + @Override + public void progress() { + } + }; + return reporter; + } + + private List> getUncompressedSegments() throws IOException { + List> segments = new ArrayList>(); + for (int i = 1; i < 1; i++) { + segments.add(getUncompressedSegment(i)); + System.out.println("adding segment"); + } + return segments; + } + + private List> getCompressedSegments() throws IOException { + List> segments = new ArrayList>(); + for (int i = 1; i < 1; i++) { + segments.add(getCompressedSegment(i)); + System.out.println("adding segment"); + } + return segments; + } + + private Segment getUncompressedSegment(int i) throws IOException { + return new Segment(getReader(i), false); + } + + private Segment getCompressedSegment(int i) throws IOException { + return new Segment(getReader(i), false, 3000l); + } + + @SuppressWarnings("unchecked") + private Reader getReader(int i) throws IOException { + Reader readerMock = mock(Reader.class); + when(readerMock.getPosition()).thenReturn(0l).thenReturn(10l).thenReturn( + 20l); + when( + readerMock.nextRawKey(any(DataInputBuffer.class))) + .thenAnswer(getKeyAnswer("Segment" + i)); + doAnswer(getValueAnswer("Segment" + i)).when(readerMock).nextRawValue( + any(DataInputBuffer.class)); + + return readerMock; + } + + private Answer getKeyAnswer(final String segmentName) { + return new Answer() { + int i = 0; + + public Boolean answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + DataInputBuffer key = (DataInputBuffer) args[0]; + if (i++ == 2) { + return false; + } + key.reset(("Segement Key " + segmentName + i).getBytes(), 20); + return true; + } + }; + } + + private Answer getValueAnswer(final String segmentName) { + return new Answer() { + int i = 0; + + public Void answer(InvocationOnMock invocation) { + Object[] args = invocation.getArguments(); + DataInputBuffer key = (DataInputBuffer) args[0]; + if (i++ == 2) { + return null; + } + key.reset(("Segement Value " + segmentName + i).getBytes(), 20); + return null; + } + }; + } +} \ No newline at end of file