diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java index e1d4844b9a3..2ef2a3f9e92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ErasureCoding.java @@ -140,7 +140,7 @@ public class ErasureCoding { rpcServer.checkOperation(OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("getErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); ErasureCodingPolicy ret = rpcClient.invokeSequential( @@ -153,7 +153,7 @@ public class ErasureCoding { rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("setErasureCodingPolicy", new Class[] {String.class, String.class}, new RemoteParam(), ecPolicyName); @@ -168,7 +168,7 @@ public class ErasureCoding { rpcServer.checkOperation(OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod remoteMethod = new RemoteMethod("unsetErasureCodingPolicy", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java index a6f5baba393..0ac64a11378 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Quota.java @@ -213,13 +213,14 @@ public class Quota { if (manager != null) { Set childrenPaths = manager.getPaths(path); for (String childPath : childrenPaths) { - locations.addAll(rpcServer.getLocationsForPath(childPath, true, false)); + locations.addAll( + rpcServer.getLocationsForPath(childPath, false, false)); } } if (locations.size() >= 1) { return locations; } else { - locations.addAll(rpcServer.getLocationsForPath(path, true, false)); + locations.addAll(rpcServer.getLocationsForPath(path, false, false)); return locations; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 0a14b99f51e..550f5e7e90b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -396,7 +396,7 @@ public class RouterClientProtocol implements ClientProtocol { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setPermission", new Class[] {String.class, FsPermission.class}, new RemoteParam(), permissions); @@ -413,7 +413,7 @@ public class RouterClientProtocol implements ClientProtocol { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setOwner", new Class[] {String.class, String.class, String.class}, new RemoteParam(), username, groupname); @@ -672,7 +672,7 @@ public class RouterClientProtocol implements ClientProtocol { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true); + rpcServer.getLocationsForPath(src, false); RemoteMethod method = new RemoteMethod("mkdirs", new Class[] {String.class, FsPermission.class, boolean.class}, new RemoteParam(), masked, createParent); @@ -725,7 +725,7 @@ public class RouterClientProtocol implements ClientProtocol { // Locate the dir and fetch the listing final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("getListing", new Class[] {String.class, startAfter.getClass(), boolean.class}, new RemoteParam(), startAfter, needLocation); @@ -1182,7 +1182,7 @@ public class RouterClientProtocol implements ClientProtocol { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setTimes", new Class[] {String.class, long.class, long.class}, new RemoteParam(), mtime, atime); @@ -1212,7 +1212,7 @@ public class RouterClientProtocol implements ClientProtocol { rpcServer.checkOperation(NameNode.OperationCategory.READ); final List locations = - rpcServer.getLocationsForPath(path, true, false); + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("getLinkTarget", new Class[] {String.class}, new RemoteParam()); return rpcClient.invokeSequential(locations, method, String.class, null); @@ -1310,7 +1310,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("modifyAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1328,7 +1328,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeAclEntries", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1345,7 +1345,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeDefaultAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1361,7 +1361,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeAcl", new Class[] {String.class}, new RemoteParam()); if (rpcServer.isInvokeConcurrent(src)) { @@ -1377,7 +1377,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod( "setAcl", new Class[] {String.class, List.class}, new RemoteParam(), aclSpec); @@ -1407,7 +1407,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("createEncryptionZone", new Class[] {String.class, String.class}, new RemoteParam(), keyName); @@ -1454,7 +1454,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setXAttr", new Class[] {String.class, XAttr.class, EnumSet.class}, new RemoteParam(), xAttr, flag); @@ -1500,7 +1500,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("removeXAttr", new Class[] {String.class, XAttr.class}, new RemoteParam(), xAttr); if (rpcServer.isInvokeConcurrent(src)) { @@ -1516,7 +1516,7 @@ public class RouterClientProtocol implements ClientProtocol { // TODO handle virtual directories final List locations = - rpcServer.getLocationsForPath(path, true, false); + rpcServer.getLocationsForPath(path, false, false); RemoteMethod method = new RemoteMethod("checkAccess", new Class[] {String.class, FsAction.class}, new RemoteParam(), mode); @@ -1736,7 +1736,7 @@ public class RouterClientProtocol implements ClientProtocol { throws IOException { final List dstLocations = - rpcServer.getLocationsForPath(dst, true, false); + rpcServer.getLocationsForPath(dst, false, false); final Map dstMap = new HashMap<>(); Iterator iterator = srcLocations.iterator(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index e41ccc7c803..32f3e667698 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1450,7 +1450,8 @@ public class RouterRpcServer extends AbstractService * Get the possible locations of a path in the federated cluster. * * @param path Path to check. - * @param failIfLocked Fail the request if locked (top mount point). + * @param failIfLocked Fail the request if there is any mount point under + * the path. * @param needQuotaVerify If need to do the quota verification. * @return Prioritized list of locations in the federated cluster. * @throws IOException If the location for this path cannot be determined. @@ -1458,6 +1459,27 @@ public class RouterRpcServer extends AbstractService protected List getLocationsForPath(String path, boolean failIfLocked, boolean needQuotaVerify) throws IOException { try { + if (failIfLocked) { + // check if there is any mount point under the path + final List mountPoints = + this.subclusterResolver.getMountPoints(path); + if (mountPoints != null) { + StringBuilder sb = new StringBuilder(); + sb.append("The operation is not allowed because "); + if (mountPoints.isEmpty()) { + sb.append("the path: ") + .append(path) + .append(" is a mount point"); + } else { + sb.append("there are mount points: ") + .append(String.join(",", mountPoints)) + .append(" under the path: ") + .append(path); + } + throw new AccessControlException(sb.toString()); + } + } + // Check the location for this path final PathLocation location = this.subclusterResolver.getDestinationForPath(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java index 33203dcdb99..05f983c3972 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterStoragePolicy.java @@ -46,7 +46,7 @@ public class RouterStoragePolicy { rpcServer.checkOperation(NameNode.OperationCategory.WRITE); List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("setStoragePolicy", new Class[] {String.class, String.class}, new RemoteParam(), @@ -69,7 +69,7 @@ public class RouterStoragePolicy { rpcServer.checkOperation(NameNode.OperationCategory.WRITE, true); List locations = - rpcServer.getLocationsForPath(src, true, false); + rpcServer.getLocationsForPath(src, false, false); RemoteMethod method = new RemoteMethod("unsetStoragePolicy", new Class[] {String.class}, new RemoteParam()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java index cc603ddd7bb..21f5ee7bd75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java @@ -61,4 +61,14 @@ public class TestRouterHDFSContractRootDirectory extends public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java index 32ec1617d1f..faa08ba7861 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java @@ -60,4 +60,14 @@ public class TestRouterHDFSContractRootDirectorySecure public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java index 0128b3010d4..dd2bbff7d8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java @@ -61,4 +61,14 @@ public class TestRouterWebHDFSContractRootDirectory extends public void testRecursiveRootListing() throws IOException { // It doesn't apply because we still have the mount points here } + + @Override + public void testRmRootRecursive() { + // It doesn't apply because we still have the mount points here + } + + @Override + public void testRmEmptyRootDirRecursive() { + // It doesn't apply because we still have the mount points here + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java index 24dfc3fd313..b745ecdcee5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -51,6 +52,7 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.test.LambdaTestUtils; import org.apache.hadoop.util.Time; import org.junit.After; @@ -474,4 +476,86 @@ public class TestRouterMountTable { nnFs1.delete(new Path("/testlist/tmp1"), true); } } + + /** + * Regression test for HDFS-13255. + * Verify that delete fails if the path is a mount point or + * there are any mount point under the path. + */ + @Test + public void testDeleteMountPoint() throws Exception { + try { + MountTable addEntry = MountTable.newInstance("/testdelete/subdir", + Collections.singletonMap("ns0", "/testdelete/subdir")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/testdelete/subdir")); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "subdir under the path: /testdelete", + () -> routerFs.delete(new Path("/testdelete"), true)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "subdir under the path: /testdelete", + () -> routerFs.delete(new Path("/testdelete"), false)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testdelete/subdir is a mount point", + () -> routerFs.delete(new Path("/testdelete/subdir"), true)); + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testdelete/subdir is a mount point", + () -> routerFs.delete(new Path("/testdelete/subdir"), false)); + } finally { + nnFs0.delete(new Path("/testdelete"), true); + } + } + + /** + * Regression test for HDFS-13255. + * Verify that rename fails if the src path is a mount point or + * there are any mount point under the path. + */ + @Test + public void testRenameMountPoint() throws Exception { + try { + MountTable addEntry = MountTable.newInstance("/testrename1/sub", + Collections.singletonMap("ns0", "/testrename1/sub")); + assertTrue(addMountTable(addEntry)); + addEntry = MountTable.newInstance("/testrename2/sub", + Collections.singletonMap("ns0", "/testrename2/sub")); + assertTrue(addMountTable(addEntry)); + nnFs0.mkdirs(new Path("/testrename1/sub/sub")); + nnFs0.mkdirs(new Path("/testrename2")); + + // Success: rename a directory to a mount point + assertTrue(nnFs0.exists(new Path("/testrename1/sub/sub"))); + assertFalse(nnFs0.exists(new Path("/testrename2/sub"))); + assertTrue(routerFs.rename(new Path("/testrename1/sub/sub"), + new Path("/testrename2"))); + assertFalse(nnFs0.exists(new Path("/testrename1/sub/sub"))); + assertTrue(nnFs0.exists(new Path("/testrename2/sub"))); + + // Fail: the target already exists + nnFs0.mkdirs(new Path("/testrename1/sub/sub")); + assertFalse(routerFs.rename(new Path("/testrename1/sub/sub"), + new Path("/testrename2"))); + + // Fail: The src is a mount point + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because the path: " + + "/testrename1/sub is a mount point", + () -> routerFs.rename(new Path("/testrename1/sub"), + new Path("/testrename2/sub"))); + + // Fail: There is a mount point under the src + LambdaTestUtils.intercept(AccessControlException.class, + "The operation is not allowed because there are mount points: " + + "sub under the path: /testrename1", + () -> routerFs.rename(new Path("/testrename1"), + new Path("/testrename2/sub"))); + } finally { + nnFs0.delete(new Path("/testrename1"), true); + nnFs0.delete(new Path("/testrename2"), true); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java index 81d5023cfc0..6dc98b85ac6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java @@ -19,12 +19,14 @@ package org.apache.hadoop.hdfs.server.federation.router; import static org.apache.hadoop.test.LambdaTestUtils.intercept; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; import java.util.List; @@ -141,29 +143,35 @@ public class TestRouterQuota { addMountTable(mountTable2); final FileSystem routerFs = routerContext.getFileSystem(); - GenericTestUtils.waitFor(new Supplier() { - - @Override - public Boolean get() { - boolean isNsQuotaViolated = false; - try { - // create new directory to trigger NSQuotaExceededException - routerFs.mkdirs(new Path("/nsquota/" + UUID.randomUUID())); - routerFs.mkdirs(new Path("/nsquota/subdir/" + UUID.randomUUID())); - } catch (NSQuotaExceededException e) { - isNsQuotaViolated = true; - } catch (IOException ignored) { - } - return isNsQuotaViolated; + final List created = new ArrayList<>(); + GenericTestUtils.waitFor(() -> { + boolean isNsQuotaViolated = false; + try { + // create new directory to trigger NSQuotaExceededException + Path p = new Path("/nsquota/" + UUID.randomUUID()); + routerFs.mkdirs(p); + created.add(p); + p = new Path("/nsquota/subdir/" + UUID.randomUUID()); + routerFs.mkdirs(p); + created.add(p); + } catch (NSQuotaExceededException e) { + isNsQuotaViolated = true; + } catch (IOException ignored) { } + return isNsQuotaViolated; }, 5000, 60000); + // mkdir in real FileSystem should be okay nnFs1.mkdirs(new Path("/testdir1/" + UUID.randomUUID())); nnFs2.mkdirs(new Path("/testdir2/" + UUID.randomUUID())); - // delete/rename call should be still okay - routerFs.delete(new Path("/nsquota"), true); - routerFs.rename(new Path("/nsquota/subdir"), new Path("/nsquota/subdir")); + // rename/delete call should be still okay + assertFalse(created.isEmpty()); + for(Path src: created) { + final Path dst = new Path(src.toString()+"-renamed"); + routerFs.rename(src, dst); + routerFs.delete(dst, true); + } } @Test @@ -376,7 +384,7 @@ public class TestRouterQuota { /** * Remove a mount table entry to the mount table through the admin API. - * @param entry Mount table entry to remove. + * @param path Mount table entry to remove. * @return If it was successfully removed. * @throws IOException Problems removing entries. */ @@ -677,8 +685,8 @@ public class TestRouterQuota { assertEquals(BLOCK_SIZE, mountQuota2.getSpaceConsumed()); FileSystem routerFs = routerContext.getFileSystem(); - // Remove destination directory for the mount entry - routerFs.delete(new Path("/setdir1"), true); + // Remove file in setdir1. The target directory still exists. + routerFs.delete(new Path("/setdir1/file1"), true); // Create file routerClient.create("/setdir2/file3", true).close(); @@ -699,9 +707,9 @@ public class TestRouterQuota { updatedMountTable = getMountTable("/setdir2"); mountQuota2 = updatedMountTable.getQuota(); - // If destination is not present the quota usage should be reset to 0 - assertEquals(0, cacheQuota1.getFileAndDirectoryCount()); - assertEquals(0, mountQuota1.getFileAndDirectoryCount()); + // The quota usage should be reset. + assertEquals(1, cacheQuota1.getFileAndDirectoryCount()); + assertEquals(1, mountQuota1.getFileAndDirectoryCount()); assertEquals(0, cacheQuota1.getSpaceConsumed()); assertEquals(0, mountQuota1.getSpaceConsumed());