diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CompressedSplitLineReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CompressedSplitLineReader.java index ef51f5cc678..9d0e949a10b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CompressedSplitLineReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CompressedSplitLineReader.java @@ -165,4 +165,9 @@ public class CompressedSplitLineReader extends SplitLineReader { public boolean needAdditionalRecordAfterSplit() { return !finished && needAdditionalRecord; } + + @Override + protected void unsetNeedAdditionalRecordAfterSplit() { + needAdditionalRecord = false; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java index f50e1efb7ba..844250bf2b2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java @@ -654,4 +654,33 @@ public class TestLineRecordReader { assertFalse(reader.next(key, value)); assertEquals(12, reader.getPos()); } + + @Test + public void testBzipWithMultibyteDelimiter() throws IOException { + String testFileName = "compressedMultibyteDelimiter.txt.bz2"; + // firstSplitLength < (headers + blockMarker) will pass always since no + // records will be read (in the test file that is byte 0..9) + // firstSplitlength > (compressed file length - one compressed block + // size + 1) will also always pass since the second split will be empty + // (833 bytes is the last block start in the used data file) + int firstSplitLength = 100; + URL testFileUrl = getClass().getClassLoader().getResource(testFileName); + assertNotNull("Cannot find " + testFileName, testFileUrl); + File testFile = new File(testFileUrl.getFile()); + long testFileSize = testFile.length(); + Path testFilePath = new Path(testFile.getAbsolutePath()); + assertTrue("Split size is smaller than header length", + firstSplitLength > 9); + assertTrue("Split size is larger than compressed file size " + + testFilePath, testFileSize > firstSplitLength); + + Configuration conf = new Configuration(); + conf.setInt(org.apache.hadoop.mapreduce.lib.input. + LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE); + + String delimiter = "\r\r\n"; + conf.set("textinputformat.record.delimiter", delimiter); + testSplitRecordsForFile(conf, firstSplitLength, testFileSize, + testFilePath); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java index 6819af7e97b..716c4a6c081 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java @@ -617,4 +617,33 @@ public class TestLineRecordReader { // Key should be 12 right after "123456789\r\r\n" assertEquals(12, key.get()); } + + @Test + public void testBzipWithMultibyteDelimiter() throws IOException { + String testFileName = "compressedMultibyteDelimiter.txt.bz2"; + // firstSplitLength < (headers + blockMarker) will pass always since no + // records will be read (in the test file that is byte 0..9) + // firstSplitlength > (compressed file length - one compressed block + // size + 1) will also always pass since the second split will be empty + // (833 bytes is the last block start in the used data file) + int firstSplitLength = 100; + URL testFileUrl = getClass().getClassLoader().getResource(testFileName); + assertNotNull("Cannot find " + testFileName, testFileUrl); + File testFile = new File(testFileUrl.getFile()); + long testFileSize = testFile.length(); + Path testFilePath = new Path(testFile.getAbsolutePath()); + assertTrue("Split size is smaller than header length", + firstSplitLength > 9); + assertTrue("Split size is larger than compressed file size " + + testFilePath, testFileSize > firstSplitLength); + + Configuration conf = new Configuration(); + conf.setInt(org.apache.hadoop.mapreduce.lib.input. + LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE); + + String delimiter = "\r\r\n"; + conf.set("textinputformat.record.delimiter", delimiter); + testSplitRecordsForFile(conf, firstSplitLength, testFileSize, + testFilePath); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/compressedMultibyteDelimiter.txt.bz2 b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/compressedMultibyteDelimiter.txt.bz2 new file mode 100644 index 00000000000..f8e178f08a8 Binary files /dev/null and b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/compressedMultibyteDelimiter.txt.bz2 differ