diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a88324336f3..86f193074b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -670,6 +670,9 @@ Release 2.5.0 - UNRELEASED HDFS-6563. NameNode cannot save fsimage in certain circumstances when snapshots are in use. (atm) + HDFS-3848. A Bug in recoverLeaseInternal method of FSNameSystem class + (Hooman Peiro Sajjad and Chen He via kihwal) + BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 3846add44ee..ad7b4d61285 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2584,10 +2584,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // We found the lease for this file. And surprisingly the original // holder is trying to recreate this file. This should never occur. // + if (!force && lease != null) { Lease leaseFile = leaseManager.getLeaseByPath(src); - if ((leaseFile != null && leaseFile.equals(lease)) || - lease.getHolder().equals(holder)) { + if (leaseFile != null && leaseFile.equals(lease)) { throw new AlreadyBeingCreatedException( "failed to create file " + src + " for " + holder + " for client " + clientMachine + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java index 04a0d2298e7..6d981fbfd18 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java @@ -153,6 +153,15 @@ public class TestLeaseRecovery2 { verifyFile(dfs, filepath1, actual, size); } + @Test + public void testLeaseRecoverByAnotherUser() throws Exception { + byte [] actual = new byte[FILE_SIZE]; + cluster.setLeasePeriod(SHORT_LEASE_PERIOD, LONG_LEASE_PERIOD); + Path filepath = createFile("/immediateRecoverLease-x", 0, true); + recoverLeaseUsingCreate2(filepath); + verifyFile(dfs, filepath, actual, 0); + } + private Path createFile(final String filestr, final int size, final boolean triggerLeaseRenewerInterrupt) throws IOException, InterruptedException { @@ -196,7 +205,7 @@ public class TestLeaseRecovery2 { } private void recoverLeaseUsingCreate(Path filepath) - throws IOException, InterruptedException { + throws IOException, InterruptedException { FileSystem dfs2 = getFSAsAnotherUser(conf); for(int i = 0; i < 10; i++) { AppendTestUtil.LOG.info("i=" + i); @@ -216,6 +225,20 @@ public class TestLeaseRecovery2 { fail("recoverLeaseUsingCreate failed"); } + private void recoverLeaseUsingCreate2(Path filepath) + throws Exception { + FileSystem dfs2 = getFSAsAnotherUser(conf); + int size = AppendTestUtil.nextInt(FILE_SIZE); + DistributedFileSystem dfsx = (DistributedFileSystem) dfs2; + //create file using dfsx + Path filepath2 = new Path("/immediateRecoverLease-x2"); + FSDataOutputStream stm = dfsx.create(filepath2, true, BUF_SIZE, + REPLICATION_NUM, BLOCK_SIZE); + assertTrue(dfsx.dfs.exists("/immediateRecoverLease-x2")); + try {Thread.sleep(10000);} catch (InterruptedException e) {} + dfsx.append(filepath); + } + private void verifyFile(FileSystem dfs, Path filepath, byte[] actual, int size) throws IOException { AppendTestUtil.LOG.info("Lease for file " + filepath + " is recovered. "