diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 390bfe55fd9..48f351821fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -299,6 +299,9 @@ Trunk (Unreleased) HDFS-4382. Fix typo MAX_NOT_CHANGED_INTERATIONS. (Ted Yu via suresh) + HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth + via atm) + Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 49eb7dc3acc..bbcb2dd2e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -648,7 +648,7 @@ class BlockSender implements java.io.Closeable { ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize); - while (endOffset > offset) { + while (endOffset > offset && !Thread.currentThread().isInterrupted()) { manageOsCache(); long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, throttler); @@ -656,16 +656,19 @@ class BlockSender implements java.io.Closeable { totalRead += len + (numberOfChunks(len) * checksumSize); seqno++; } - try { - // send an empty packet to mark the end of the block - sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, - throttler); - out.flush(); - } catch (IOException e) { //socket error - throw ioeToSocketException(e); - } + // If this thread was interrupted, then it did not send the full block. + if (!Thread.currentThread().isInterrupted()) { + try { + // send an empty packet to mark the end of the block + sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo, + throttler); + out.flush(); + } catch (IOException e) { //socket error + throw ioeToSocketException(e); + } - sentEntireByteRange = true; + sentEntireByteRange = true; + } } finally { if (clientTraceFmt != null) { final long endTime = System.nanoTime(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java index a4f04a68d44..d00d4341b94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DataTransferThrottler.java @@ -96,7 +96,12 @@ public class DataTransferThrottler { // Wait for next period so that curReserve can be increased. try { wait( curPeriodEnd - now ); - } catch (InterruptedException ignored) {} + } catch (InterruptedException e) { + // Abort throttle and reset interrupted status to make sure other + // interrupt handling higher in the call stack executes. + Thread.currentThread().interrupt(); + break; + } } else if ( now < (curPeriodStart + periodExtension)) { curPeriodStart = curPeriodEnd; curReserve += bytesPerPeriod; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java index 3ca1cf3ec26..9563361094c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java @@ -158,7 +158,7 @@ public class TestLargeBlock { * Test for block size of 2GB + 512B * @throws IOException in case of errors */ - @Test + @Test(timeout = 120000) public void testLargeBlockSize() throws IOException { final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B runTest(blockSize);