diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a4a35538baf..6c2404d5413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2073,6 +2073,9 @@ Release 0.23.7 - UNRELEASED HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal) + HDFS-4495. Allow client-side lease renewal to be retried beyond soft-limit + (kihwal) + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index e6908598aaa..6352de81e19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -622,10 +622,10 @@ public class DFSClient implements java.io.Closeable { } catch (IOException e) { // Abort if the lease has already expired. final long elapsed = Time.now() - getLastLeaseRenewal(); - if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) { + if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) { LOG.warn("Failed to renew lease for " + clientName + " for " + (elapsed/1000) + " seconds (>= soft-limit =" - + (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) " + + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) " + "Closing all files being written ...", e); closeAllFilesBeingWritten(true); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java index 1940b6dcd03..7470def9693 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; @@ -85,9 +86,26 @@ public class TestLease { // We don't need to wait the lease renewer thread to act. // call renewLease() manually. - // make it look like lease has already expired. + // make it look like the soft limit has been exceeded. LeaseRenewer originalRenewer = dfs.getLeaseRenewer(); - dfs.lastLeaseRenewal = Time.now() - 300000; + dfs.lastLeaseRenewal = Time.now() + - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000; + try { + dfs.renewLease(); + } catch (IOException e) {} + + // Things should continue to work it passes hard limit without + // renewing. + try { + d_out.write(buf, 0, 1024); + LOG.info("Write worked beyond the soft limit as expected."); + } catch (IOException e) { + Assert.fail("Write failed."); + } + + // make it look like the hard limit has been exceeded. + dfs.lastLeaseRenewal = Time.now() + - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000; dfs.renewLease(); // this should not work.