HDFS-4292. Sanity check not correct in RemoteBlockReader2.newBlockReader. Contributed by Binglin Chang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1419675 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2012-12-10 19:13:03 +00:00
parent 8114bcc6d3
commit cb9f516756
3 changed files with 5 additions and 2 deletions

View File

@ -591,6 +591,9 @@ Release 2.0.3-alpha - Unreleased
HDFS-4291. edit log unit tests leave stray test_edit_log_file around HDFS-4291. edit log unit tests leave stray test_edit_log_file around
(Colin Patrick McCabe via todd) (Colin Patrick McCabe via todd)
HDFS-4292. Sanity check not correct in RemoteBlockReader2.newBlockReader
(Binglin Chang via todd)
BREAKDOWN OF HDFS-3077 SUBTASKS BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs. HDFS-3077. Quorum-based protocol for reading and writing edit logs.

View File

@ -404,7 +404,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
long firstChunkOffset = checksumInfo.getChunkOffset(); long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) { firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" + throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " + firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file); startOffset + " for file " + file);

View File

@ -413,7 +413,7 @@ public class RemoteBlockReader2 implements BlockReader {
long firstChunkOffset = checksumInfo.getChunkOffset(); long firstChunkOffset = checksumInfo.getChunkOffset();
if ( firstChunkOffset < 0 || firstChunkOffset > startOffset || if ( firstChunkOffset < 0 || firstChunkOffset > startOffset ||
firstChunkOffset >= (startOffset + checksum.getBytesPerChecksum())) { firstChunkOffset <= (startOffset - checksum.getBytesPerChecksum())) {
throw new IOException("BlockReader: error in first chunk offset (" + throw new IOException("BlockReader: error in first chunk offset (" +
firstChunkOffset + ") startOffset is " + firstChunkOffset + ") startOffset is " +
startOffset + " for file " + file); startOffset + " for file " + file);