HDDS-1373. KeyOutputStream, close after write request fails after retries, runs into IllegalArgumentException.(#729)
This commit is contained in:
parent
082f1e0437
commit
d608be660f
|
@ -102,4 +102,10 @@ public class ExcludeList {
|
||||||
});
|
});
|
||||||
return excludeList;
|
return excludeList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void clear() {
|
||||||
|
datanodes.clear();
|
||||||
|
containerIds.clear();
|
||||||
|
pipelineIds.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -295,6 +295,7 @@ public class KeyOutputStream extends OutputStream {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
int succeededAllocates = 0;
|
int succeededAllocates = 0;
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
|
try {
|
||||||
if (streamEntries.size() <= currentStreamIndex) {
|
if (streamEntries.size() <= currentStreamIndex) {
|
||||||
Preconditions.checkNotNull(omClient);
|
Preconditions.checkNotNull(omClient);
|
||||||
// allocate a new block, if a exception happens, log an error and
|
// allocate a new block, if a exception happens, log an error and
|
||||||
|
@ -304,7 +305,8 @@ public class KeyOutputStream extends OutputStream {
|
||||||
succeededAllocates += 1;
|
succeededAllocates += 1;
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
LOG.error("Try to allocate more blocks for write failed, already "
|
LOG.error("Try to allocate more blocks for write failed, already "
|
||||||
+ "allocated " + succeededAllocates + " blocks for this write.");
|
+ "allocated " + succeededAllocates
|
||||||
|
+ " blocks for this write.");
|
||||||
throw ioe;
|
throw ioe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -316,7 +318,7 @@ public class KeyOutputStream extends OutputStream {
|
||||||
// length(len) will be in int range if the call is happening through
|
// length(len) will be in int range if the call is happening through
|
||||||
// write API of blockOutputStream. Length can be in long range if it comes
|
// write API of blockOutputStream. Length can be in long range if it comes
|
||||||
// via Exception path.
|
// via Exception path.
|
||||||
int writeLen = Math.min((int)len, (int) current.getRemaining());
|
int writeLen = Math.min((int) len, (int) current.getRemaining());
|
||||||
long currentPos = current.getWrittenDataLength();
|
long currentPos = current.getWrittenDataLength();
|
||||||
try {
|
try {
|
||||||
if (retry) {
|
if (retry) {
|
||||||
|
@ -349,6 +351,10 @@ public class KeyOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
len -= writeLen;
|
len -= writeLen;
|
||||||
off += writeLen;
|
off += writeLen;
|
||||||
|
} catch (Exception e) {
|
||||||
|
markStreamClosed();
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,7 +371,7 @@ public class KeyOutputStream extends OutputStream {
|
||||||
// pre allocated blocks available.
|
// pre allocated blocks available.
|
||||||
|
|
||||||
// This will be called only to discard the next subsequent unused blocks
|
// This will be called only to discard the next subsequent unused blocks
|
||||||
// in the sreamEntryList.
|
// in the streamEntryList.
|
||||||
if (streamIndex < streamEntries.size()) {
|
if (streamIndex < streamEntries.size()) {
|
||||||
ListIterator<BlockOutputStreamEntry> streamEntryIterator =
|
ListIterator<BlockOutputStreamEntry> streamEntryIterator =
|
||||||
streamEntries.listIterator(streamIndex);
|
streamEntries.listIterator(streamIndex);
|
||||||
|
@ -398,6 +404,20 @@ public class KeyOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void cleanup() {
|
||||||
|
if (excludeList != null) {
|
||||||
|
excludeList.clear();
|
||||||
|
excludeList = null;
|
||||||
|
}
|
||||||
|
if (bufferPool != null) {
|
||||||
|
bufferPool.clearBufferPool();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (streamEntries != null) {
|
||||||
|
streamEntries.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* It performs following actions :
|
* It performs following actions :
|
||||||
* a. Updates the committed length at datanode for the current stream in
|
* a. Updates the committed length at datanode for the current stream in
|
||||||
|
@ -418,8 +438,7 @@ public class KeyOutputStream extends OutputStream {
|
||||||
closedContainerException = checkIfContainerIsClosed(t);
|
closedContainerException = checkIfContainerIsClosed(t);
|
||||||
}
|
}
|
||||||
PipelineID pipelineId = null;
|
PipelineID pipelineId = null;
|
||||||
long totalSuccessfulFlushedData =
|
long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
|
||||||
streamEntry.getTotalAckDataLength();
|
|
||||||
//set the correct length for the current stream
|
//set the correct length for the current stream
|
||||||
streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
|
streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
|
||||||
long bufferedDataLen = computeBufferData();
|
long bufferedDataLen = computeBufferData();
|
||||||
|
@ -450,8 +469,8 @@ public class KeyOutputStream extends OutputStream {
|
||||||
if (closedContainerException) {
|
if (closedContainerException) {
|
||||||
// discard subsequent pre allocated blocks from the streamEntries list
|
// discard subsequent pre allocated blocks from the streamEntries list
|
||||||
// from the closed container
|
// from the closed container
|
||||||
discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(),
|
discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(), null,
|
||||||
null, streamIndex + 1);
|
streamIndex + 1);
|
||||||
} else {
|
} else {
|
||||||
// In case there is timeoutException or Watch for commit happening over
|
// In case there is timeoutException or Watch for commit happening over
|
||||||
// majority or the client connection failure to the leader in the
|
// majority or the client connection failure to the leader in the
|
||||||
|
@ -475,6 +494,11 @@ public class KeyOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void markStreamClosed() {
|
||||||
|
cleanup();
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
|
||||||
private void handleRetry(IOException exception, long len) throws IOException {
|
private void handleRetry(IOException exception, long len) throws IOException {
|
||||||
RetryPolicy.RetryAction action;
|
RetryPolicy.RetryAction action;
|
||||||
try {
|
try {
|
||||||
|
@ -586,13 +610,15 @@ public class KeyOutputStream extends OutputStream {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
|
try {
|
||||||
int size = streamEntries.size();
|
int size = streamEntries.size();
|
||||||
int streamIndex =
|
int streamIndex =
|
||||||
currentStreamIndex >= size ? size - 1 : currentStreamIndex;
|
currentStreamIndex >= size ? size - 1 : currentStreamIndex;
|
||||||
BlockOutputStreamEntry entry = streamEntries.get(streamIndex);
|
BlockOutputStreamEntry entry = streamEntries.get(streamIndex);
|
||||||
if (entry != null) {
|
if (entry != null) {
|
||||||
try {
|
try {
|
||||||
Collection<DatanodeDetails> failedServers = entry.getFailedServers();
|
Collection<DatanodeDetails> failedServers =
|
||||||
|
entry.getFailedServers();
|
||||||
// failed servers can be null in case there is no data written in the
|
// failed servers can be null in case there is no data written in the
|
||||||
// stream
|
// stream
|
||||||
if (failedServers != null && !failedServers.isEmpty()) {
|
if (failedServers != null && !failedServers.isEmpty()) {
|
||||||
|
@ -620,6 +646,10 @@ public class KeyOutputStream extends OutputStream {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
} catch (Exception e) {
|
||||||
|
markStreamClosed();
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -658,7 +688,7 @@ public class KeyOutputStream extends OutputStream {
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
throw ioe;
|
throw ioe;
|
||||||
} finally {
|
} finally {
|
||||||
bufferPool.clearBufferPool();
|
cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -189,6 +189,7 @@ public class TestBlockOutputStream {
|
||||||
// flush ensures watchForCommit updates the total length acknowledged
|
// flush ensures watchForCommit updates the total length acknowledged
|
||||||
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
|
|
||||||
|
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
||||||
// now close the stream, It will update the ack length after watchForCommit
|
// now close the stream, It will update the ack length after watchForCommit
|
||||||
key.close();
|
key.close();
|
||||||
|
|
||||||
|
@ -208,7 +209,7 @@ public class TestBlockOutputStream {
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,6 +264,7 @@ public class TestBlockOutputStream {
|
||||||
// Now do a flush. This will flush the data and update the flush length and
|
// Now do a flush. This will flush the data and update the flush length and
|
||||||
// the map.
|
// the map.
|
||||||
key.flush();
|
key.flush();
|
||||||
|
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
||||||
// flush is a sync call, all pending operations will complete
|
// flush is a sync call, all pending operations will complete
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
|
@ -302,7 +304,7 @@ public class TestBlockOutputStream {
|
||||||
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
||||||
Assert.assertEquals(totalOpCount + 3,
|
Assert.assertEquals(totalOpCount + 3,
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -397,6 +399,7 @@ public class TestBlockOutputStream {
|
||||||
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
||||||
Assert.assertEquals(totalOpCount + 3,
|
Assert.assertEquals(totalOpCount + 3,
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -454,6 +457,7 @@ public class TestBlockOutputStream {
|
||||||
blockOutputStream.getCommitIndex2flushedDataMap().size());
|
blockOutputStream.getCommitIndex2flushedDataMap().size());
|
||||||
|
|
||||||
Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(0, blockOutputStream.getTotalAckDataLength());
|
||||||
|
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
||||||
key.close();
|
key.close();
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
|
@ -471,7 +475,7 @@ public class TestBlockOutputStream {
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,6 +540,7 @@ public class TestBlockOutputStream {
|
||||||
// Now do a flush. This will flush the data and update the flush length and
|
// Now do a flush. This will flush the data and update the flush length and
|
||||||
// the map.
|
// the map.
|
||||||
key.flush();
|
key.flush();
|
||||||
|
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -570,7 +575,7 @@ public class TestBlockOutputStream {
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -638,6 +643,7 @@ public class TestBlockOutputStream {
|
||||||
// Now do a flush. This will flush the data and update the flush length and
|
// Now do a flush. This will flush the data and update the flush length and
|
||||||
// the map.
|
// the map.
|
||||||
key.flush();
|
key.flush();
|
||||||
|
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -673,7 +679,7 @@ public class TestBlockOutputStream {
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
Assert.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(1, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -234,6 +234,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
// and one flush for partial chunk
|
// and one flush for partial chunk
|
||||||
key.flush();
|
key.flush();
|
||||||
|
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
||||||
.getIoException()) instanceof ContainerNotOpenException);
|
.getIoException()) instanceof ContainerNotOpenException);
|
||||||
|
|
||||||
|
@ -249,7 +250,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -372,6 +373,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
key.flush();
|
key.flush();
|
||||||
Assert.assertEquals(2, raftClient.getCommitInfoMap().size());
|
Assert.assertEquals(2, raftClient.getCommitInfoMap().size());
|
||||||
|
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
// now close the stream, It will update the ack length after watchForCommit
|
// now close the stream, It will update the ack length after watchForCommit
|
||||||
key.close();
|
key.close();
|
||||||
Assert
|
Assert
|
||||||
|
@ -382,7 +384,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -515,13 +517,14 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
// Make sure the retryCount is reset after the exception is handled
|
// Make sure the retryCount is reset after the exception is handled
|
||||||
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
||||||
// now close the stream, It will update the ack length after watchForCommit
|
// now close the stream, It will update the ack length after watchForCommit
|
||||||
|
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
key.close();
|
key.close();
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -538,7 +541,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -637,6 +640,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
// and one flush for partial chunk
|
// and one flush for partial chunk
|
||||||
key.flush();
|
key.flush();
|
||||||
|
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
||||||
.getIoException()) instanceof ContainerNotOpenException);
|
.getIoException()) instanceof ContainerNotOpenException);
|
||||||
// Make sure the retryCount is reset after the exception is handled
|
// Make sure the retryCount is reset after the exception is handled
|
||||||
|
@ -652,7 +656,6 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -663,7 +666,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
||||||
Assert.assertEquals(totalOpCount + 9,
|
Assert.assertEquals(totalOpCount + 9,
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
|
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
|
||||||
// Written the same data twice
|
// Written the same data twice
|
||||||
String dataString = new String(data1, UTF_8);
|
String dataString = new String(data1, UTF_8);
|
||||||
validateData(keyName, dataString.concat(dataString).getBytes());
|
validateData(keyName, dataString.concat(dataString).getBytes());
|
||||||
|
@ -774,7 +777,6 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -785,7 +787,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
||||||
Assert.assertEquals(totalOpCount + 9,
|
Assert.assertEquals(totalOpCount + 9,
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
|
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
|
||||||
// Written the same data twice
|
// Written the same data twice
|
||||||
String dataString = new String(data1, UTF_8);
|
String dataString = new String(data1, UTF_8);
|
||||||
validateData(keyName, dataString.concat(dataString).getBytes());
|
validateData(keyName, dataString.concat(dataString).getBytes());
|
||||||
|
@ -911,6 +913,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
||||||
// commitInfoMap will remain intact as there is no server failure
|
// commitInfoMap will remain intact as there is no server failure
|
||||||
Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
|
Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
// now close the stream, It will update the ack length after watchForCommit
|
// now close the stream, It will update the ack length after watchForCommit
|
||||||
key.close();
|
key.close();
|
||||||
// make sure the bufferPool is empty
|
// make sure the bufferPool is empty
|
||||||
|
@ -919,7 +922,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
.assertEquals(dataLength, blockOutputStream.getTotalAckDataLength());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -1046,6 +1049,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
|
Assert.assertEquals(1, raftClient.getCommitInfoMap().size());
|
||||||
// Make sure the retryCount is reset after the exception is handled
|
// Make sure the retryCount is reset after the exception is handled
|
||||||
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
Assert.assertTrue(keyOutputStream.getRetryCount() == 0);
|
||||||
|
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
||||||
// now close the stream, It will update the ack length after watchForCommit
|
// now close the stream, It will update the ack length after watchForCommit
|
||||||
key.close();
|
key.close();
|
||||||
Assert
|
Assert
|
||||||
|
@ -1054,7 +1058,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
@ -1071,6 +1075,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock));
|
||||||
Assert.assertEquals(totalOpCount + 22,
|
Assert.assertEquals(totalOpCount + 22,
|
||||||
metrics.getTotalOpCount());
|
metrics.getTotalOpCount());
|
||||||
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
// Written the same data twice
|
// Written the same data twice
|
||||||
String dataString = new String(data1, UTF_8);
|
String dataString = new String(data1, UTF_8);
|
||||||
cluster.restartHddsDatanode(pipeline.getNodes().get(0), true);
|
cluster.restartHddsDatanode(pipeline.getNodes().get(0), true);
|
||||||
|
@ -1198,7 +1203,7 @@ public class TestBlockOutputStreamWithFailures {
|
||||||
Assert
|
Assert
|
||||||
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
.assertEquals(0, blockOutputStream.getBufferPool().computeBufferData());
|
||||||
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
Assert.assertNull(blockOutputStream.getCommitIndex2flushedDataMap());
|
||||||
Assert.assertEquals(2, keyOutputStream.getStreamEntries().size());
|
Assert.assertEquals(0, keyOutputStream.getStreamEntries().size());
|
||||||
Assert.assertEquals(pendingWriteChunkCount,
|
Assert.assertEquals(pendingWriteChunkCount,
|
||||||
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
metrics.getContainerOpsMetrics(ContainerProtos.Type.WriteChunk));
|
||||||
Assert.assertEquals(pendingPutBlockCount,
|
Assert.assertEquals(pendingPutBlockCount,
|
||||||
|
|
|
@ -19,10 +19,12 @@ package org.apache.hadoop.ozone.client.rpc;
|
||||||
import org.apache.hadoop.conf.StorageUnit;
|
import org.apache.hadoop.conf.StorageUnit;
|
||||||
import org.apache.hadoop.hdds.client.ReplicationType;
|
import org.apache.hadoop.hdds.client.ReplicationType;
|
||||||
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
|
||||||
|
import org.apache.hadoop.hdds.scm.XceiverClientManager;
|
||||||
|
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
import org.apache.hadoop.hdds.scm.container.ContainerID;
|
||||||
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
|
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
|
||||||
|
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
|
||||||
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
|
|
||||||
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
import org.apache.hadoop.hdds.scm.storage.BlockOutputStream;
|
||||||
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
import org.apache.hadoop.ozone.MiniOzoneCluster;
|
||||||
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
||||||
|
@ -66,6 +68,7 @@ public class TestOzoneClientRetriesOnException {
|
||||||
private String volumeName;
|
private String volumeName;
|
||||||
private String bucketName;
|
private String bucketName;
|
||||||
private String keyString;
|
private String keyString;
|
||||||
|
private XceiverClientManager xceiverClientManager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a MiniDFSCluster for testing.
|
* Create a MiniDFSCluster for testing.
|
||||||
|
@ -84,8 +87,6 @@ public class TestOzoneClientRetriesOnException {
|
||||||
conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
|
conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
|
||||||
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
|
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
|
||||||
conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
|
conf.set(OzoneConfigKeys.OZONE_CLIENT_CHECKSUM_TYPE, "NONE");
|
||||||
conf.setInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, 2);
|
|
||||||
conf.set(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY, "1s");
|
|
||||||
conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, 3);
|
conf.setInt(OzoneConfigKeys.OZONE_CLIENT_MAX_RETRIES, 3);
|
||||||
conf.setQuietMode(false);
|
conf.setQuietMode(false);
|
||||||
cluster = MiniOzoneCluster.newBuilder(conf)
|
cluster = MiniOzoneCluster.newBuilder(conf)
|
||||||
|
@ -100,6 +101,7 @@ public class TestOzoneClientRetriesOnException {
|
||||||
//the easiest way to create an open container is creating a key
|
//the easiest way to create an open container is creating a key
|
||||||
client = OzoneClientFactory.getClient(conf);
|
client = OzoneClientFactory.getClient(conf);
|
||||||
objectStore = client.getObjectStore();
|
objectStore = client.getObjectStore();
|
||||||
|
xceiverClientManager = new XceiverClientManager(conf);
|
||||||
keyString = UUID.randomUUID().toString();
|
keyString = UUID.randomUUID().toString();
|
||||||
volumeName = "testblockoutputstreamwithretries";
|
volumeName = "testblockoutputstreamwithretries";
|
||||||
bucketName = volumeName;
|
bucketName = volumeName;
|
||||||
|
@ -152,8 +154,9 @@ public class TestOzoneClientRetriesOnException {
|
||||||
.getIoException()) instanceof GroupMismatchException);
|
.getIoException()) instanceof GroupMismatchException);
|
||||||
Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
|
Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds()
|
||||||
.contains(pipeline.getId()));
|
.contains(pipeline.getId()));
|
||||||
key.close();
|
|
||||||
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
|
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 2);
|
||||||
|
key.close();
|
||||||
|
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0);
|
||||||
validateData(keyName, data1);
|
validateData(keyName, data1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,13 +174,8 @@ public class TestOzoneClientRetriesOnException {
|
||||||
byte[] data1 =
|
byte[] data1 =
|
||||||
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
|
ContainerTestHelper.getFixedLengthString(keyString, dataLength)
|
||||||
.getBytes(UTF_8);
|
.getBytes(UTF_8);
|
||||||
key.write(data1);
|
|
||||||
|
|
||||||
OutputStream stream = entries.get(0).getOutputStream();
|
|
||||||
Assert.assertTrue(stream instanceof BlockOutputStream);
|
|
||||||
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
|
|
||||||
List<PipelineID> pipelineList = new ArrayList<>();
|
|
||||||
long containerID;
|
long containerID;
|
||||||
|
List<Long> containerList = new ArrayList<>();
|
||||||
for (BlockOutputStreamEntry entry : entries) {
|
for (BlockOutputStreamEntry entry : entries) {
|
||||||
containerID = entry.getBlockID().getContainerID();
|
containerID = entry.getBlockID().getContainerID();
|
||||||
ContainerInfo container =
|
ContainerInfo container =
|
||||||
|
@ -186,18 +184,40 @@ public class TestOzoneClientRetriesOnException {
|
||||||
Pipeline pipeline =
|
Pipeline pipeline =
|
||||||
cluster.getStorageContainerManager().getPipelineManager()
|
cluster.getStorageContainerManager().getPipelineManager()
|
||||||
.getPipeline(container.getPipelineID());
|
.getPipeline(container.getPipelineID());
|
||||||
pipelineList.add(pipeline.getId());
|
XceiverClientSpi xceiverClient =
|
||||||
|
xceiverClientManager.acquireClient(pipeline);
|
||||||
|
if (!containerList.contains(containerID)) {
|
||||||
|
xceiverClient.sendCommand(ContainerTestHelper
|
||||||
|
.getCreateContainerRequest(containerID, pipeline));
|
||||||
}
|
}
|
||||||
ContainerTestHelper.waitForPipelineClose(key, cluster, false);
|
xceiverClientManager.releaseClient(xceiverClient, false);
|
||||||
|
}
|
||||||
|
key.write(data1);
|
||||||
|
OutputStream stream = entries.get(0).getOutputStream();
|
||||||
|
Assert.assertTrue(stream instanceof BlockOutputStream);
|
||||||
|
BlockOutputStream blockOutputStream = (BlockOutputStream) stream;
|
||||||
|
ContainerTestHelper.waitForContainerClose(key, cluster);
|
||||||
try {
|
try {
|
||||||
key.write(data1);
|
key.write(data1);
|
||||||
|
Assert.fail("Expected exception not thrown");
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
Assert.assertTrue(keyOutputStream.checkForException(blockOutputStream
|
||||||
.getIoException()) instanceof GroupMismatchException);
|
.getIoException()) instanceof ContainerNotOpenException);
|
||||||
Assert.assertTrue(ioe.getMessage().contains(
|
Assert.assertTrue(ioe.getMessage().contains(
|
||||||
"Retry request failed. retries get failed due to exceeded maximum "
|
"Retry request failed. retries get failed due to exceeded maximum "
|
||||||
+ "allowed retries number: 3"));
|
+ "allowed retries number: 3"));
|
||||||
}
|
}
|
||||||
|
try {
|
||||||
|
key.flush();
|
||||||
|
Assert.fail("Expected exception not thrown");
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
Assert.assertTrue(ioe.getMessage().contains("Stream is closed"));
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
key.close();
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
Assert.fail("Expected should not be thrown");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private OzoneOutputStream createKey(String keyName, ReplicationType type,
|
private OzoneOutputStream createKey(String keyName, ReplicationType type,
|
||||||
|
|
|
@ -727,7 +727,9 @@ public final class ContainerTestHelper {
|
||||||
keyOutputStream.getLocationInfoList();
|
keyOutputStream.getLocationInfoList();
|
||||||
List<Long> containerIdList = new ArrayList<>();
|
List<Long> containerIdList = new ArrayList<>();
|
||||||
for (OmKeyLocationInfo info : locationInfoList) {
|
for (OmKeyLocationInfo info : locationInfoList) {
|
||||||
containerIdList.add(info.getContainerID());
|
long id = info.getContainerID();
|
||||||
|
if (!containerIdList.contains(id))
|
||||||
|
containerIdList.add(id);
|
||||||
}
|
}
|
||||||
Assert.assertTrue(!containerIdList.isEmpty());
|
Assert.assertTrue(!containerIdList.isEmpty());
|
||||||
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
|
waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
|
||||||
|
|
Loading…
Reference in New Issue