HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. Contributed by Chris Nauroth
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1432226 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
2f33ec0298
commit
0dadaa47c3
|
@ -393,7 +393,11 @@ Release 2.0.3-alpha - Unreleased
|
|||
HDFS-4306. PBHelper.convertLocatedBlock miss convert BlockToken. (Binglin
|
||||
Chang via atm)
|
||||
|
||||
HDFS-4387. test_libhdfs_threaded SEGV on OpenJDK 7. (Colin McCabe via eli)
|
||||
HDFS-4384. test_libhdfs_threaded gets SEGV if JNIEnv cannot be
|
||||
initialized. (Colin McCabe via eli)
|
||||
|
||||
HDFS-4328. TestLargeBlock#testLargeBlockSize is timing out. (Chris Nauroth
|
||||
via atm)
|
||||
|
||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||
|
||||
|
|
|
@ -648,7 +648,7 @@ class BlockSender implements java.io.Closeable {
|
|||
|
||||
ByteBuffer pktBuf = ByteBuffer.allocate(pktBufSize);
|
||||
|
||||
while (endOffset > offset) {
|
||||
while (endOffset > offset && !Thread.currentThread().isInterrupted()) {
|
||||
manageOsCache();
|
||||
long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
|
||||
transferTo, throttler);
|
||||
|
@ -656,16 +656,19 @@ class BlockSender implements java.io.Closeable {
|
|||
totalRead += len + (numberOfChunks(len) * checksumSize);
|
||||
seqno++;
|
||||
}
|
||||
try {
|
||||
// send an empty packet to mark the end of the block
|
||||
sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
|
||||
throttler);
|
||||
out.flush();
|
||||
} catch (IOException e) { //socket error
|
||||
throw ioeToSocketException(e);
|
||||
}
|
||||
// If this thread was interrupted, then it did not send the full block.
|
||||
if (!Thread.currentThread().isInterrupted()) {
|
||||
try {
|
||||
// send an empty packet to mark the end of the block
|
||||
sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks, transferTo,
|
||||
throttler);
|
||||
out.flush();
|
||||
} catch (IOException e) { //socket error
|
||||
throw ioeToSocketException(e);
|
||||
}
|
||||
|
||||
sentEntireByteRange = true;
|
||||
sentEntireByteRange = true;
|
||||
}
|
||||
} finally {
|
||||
if (clientTraceFmt != null) {
|
||||
final long endTime = System.nanoTime();
|
||||
|
|
|
@ -96,7 +96,12 @@ public class DataTransferThrottler {
|
|||
// Wait for next period so that curReserve can be increased.
|
||||
try {
|
||||
wait( curPeriodEnd - now );
|
||||
} catch (InterruptedException ignored) {}
|
||||
} catch (InterruptedException e) {
|
||||
// Abort throttle and reset interrupted status to make sure other
|
||||
// interrupt handling higher in the call stack executes.
|
||||
Thread.currentThread().interrupt();
|
||||
break;
|
||||
}
|
||||
} else if ( now < (curPeriodStart + periodExtension)) {
|
||||
curPeriodStart = curPeriodEnd;
|
||||
curReserve += bytesPerPeriod;
|
||||
|
|
|
@ -158,7 +158,7 @@ public class TestLargeBlock {
|
|||
* Test for block size of 2GB + 512B
|
||||
* @throws IOException in case of errors
|
||||
*/
|
||||
@Test
|
||||
@Test(timeout = 120000)
|
||||
public void testLargeBlockSize() throws IOException {
|
||||
final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
|
||||
runTest(blockSize);
|
||||
|
|
Loading…
Reference in New Issue