HDFS-14048. DFSOutputStream close() throws exception on subsequent call after DataNode restart. Contributed by Erik Krogen.

This commit is contained in:
Inigo Goiri 2018-11-06 12:16:46 -08:00
parent 98075d9224
commit d9b3b58389
2 changed files with 5 additions and 2 deletions

View File

@ -290,7 +290,7 @@ private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam)
}
packets.clear();
}
static class LastExceptionInStreamer {
private IOException thrown;
@ -1754,6 +1754,7 @@ private boolean createBlockOutputStream(DatanodeInfo[] nodes,
blockStream = out;
result = true; // success
errorState.reset();
lastException.clear();
// remove all restarting nodes from failed nodes list
failed.removeAll(restartingNodes);
restartingNodes.clear();
@ -1835,7 +1836,7 @@ private boolean[] getPinnings(DatanodeInfo[] nodes) {
protected LocatedBlock locateFollowingBlock(DatanodeInfo[] excluded,
ExtendedBlock oldBlock) throws IOException {
final DfsClientConf conf = dfsClient.getConf();
final DfsClientConf conf = dfsClient.getConf();
int retries = conf.getNumBlockWriteLocateFollowingRetry();
long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
while (true) {

View File

@ -436,6 +436,8 @@ public Boolean get() {
0, out.getStreamer().getPipelineRecoveryCount());
out.write(1);
out.close();
// Ensure that subsequent closes are idempotent and do not throw errors
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();