diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index c1473dd268b..4c733bfa246 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -1781,6 +1781,7 @@ class DataStreamer extends Daemon { blockStream = out; result = true; // success errorState.resetInternalError(); + lastException.clear(); // remove all restarting nodes from failed nodes list failed.removeAll(restartingNodes); restartingNodes.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 3f8c7f7796b..465a0838410 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -436,6 +436,8 @@ public class TestClientProtocolForPipelineRecovery { 0, out.getStreamer().getPipelineRecoveryCount()); out.write(1); out.close(); + // Ensure that subsequent closes are idempotent and do not throw errors + out.close(); } finally { if (cluster != null) { cluster.shutdown();