diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java index 21ee3ceb0a7..328ce799ef1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java @@ -55,7 +55,7 @@ class FSDirDeleteOp { FSNamesystem fsn = fsd.getFSNamesystem(); fsd.writeLock(); try { - if (deleteAllowed(iip, iip.getPath()) ) { + if (deleteAllowed(iip)) { List snapshottableDirs = new ArrayList<>(); FSDirSnapshotOp.checkSnapshot(fsd, iip, snapshottableDirs); ReclaimContext context = new ReclaimContext( @@ -98,20 +98,24 @@ class FSDirDeleteOp { FSDirectory fsd = fsn.getFSDirectory(); FSPermissionChecker pc = fsd.getPermissionChecker(); - final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false); - src = iip.getPath(); - if (!recursive && fsd.isNonEmptyDirectory(iip)) { - throw new PathIsNotEmptyDirectoryException(src + " is non empty"); + if (FSDirectory.isExactReservedName(src)) { + throw new InvalidPathException(src); } + + final INodesInPath iip = fsd.resolvePathForWrite(pc, src, false); if (fsd.isPermissionEnabled()) { fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null, FsAction.ALL, true); } - if (recursive && fsd.isNonEmptyDirectory(iip)) { - checkProtectedDescendants(fsd, src); + if (fsd.isNonEmptyDirectory(iip)) { + if (!recursive) { + throw new PathIsNotEmptyDirectoryException( + iip.getPath() + " is non empty"); + } + checkProtectedDescendants(fsd, iip); } - return deleteInternal(fsn, src, iip, logRetryCache); + return deleteInternal(fsn, iip, logRetryCache); } /** @@ -126,17 +130,14 @@ class FSDirDeleteOp { * @param src a string representation of a path to an inode * @param mtime the time the inode is removed */ - static void deleteForEditLog(FSDirectory fsd, String src, long mtime) + static void deleteForEditLog(FSDirectory fsd, INodesInPath iip, long mtime) throws IOException { assert fsd.hasWriteLock(); FSNamesystem fsn = fsd.getFSNamesystem(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ChunkedArrayList<>(); List removedUCFiles = new ChunkedArrayList<>(); - - final INodesInPath iip = fsd.getINodesInPath4Write( - FSDirectory.normalizePath(src), false); - if (!deleteAllowed(iip, src)) { + if (!deleteAllowed(iip)) { return; } List snapshottableDirs = new ArrayList<>(); @@ -162,7 +163,6 @@ class FSDirDeleteOp { *

* For small directory or file the deletion is done in one shot. * @param fsn namespace - * @param src path name to be deleted * @param iip the INodesInPath instance containing all the INodes for the path * @param logRetryCache whether to record RPC ids in editlog for retry cache * rebuilding @@ -170,15 +170,11 @@ class FSDirDeleteOp { * @throws IOException */ static BlocksMapUpdateInfo deleteInternal( - FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache) + FSNamesystem fsn, INodesInPath iip, boolean logRetryCache) throws IOException { assert fsn.hasWriteLock(); if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); - } - - if (FSDirectory.isExactReservedName(src)) { - throw new InvalidPathException(src); + NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + iip.getPath()); } FSDirectory fsd = fsn.getFSDirectory(); @@ -193,14 +189,14 @@ class FSDirDeleteOp { if (filesRemoved < 0) { return null; } - fsd.getEditLog().logDelete(src, mtime, logRetryCache); + fsd.getEditLog().logDelete(iip.getPath(), mtime, logRetryCache); incrDeletedFileCount(filesRemoved); fsn.removeLeasesAndINodes(removedUCFiles, removedINodes, true); if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("DIR* Namesystem.delete: " - + src +" is removed"); + NameNode.stateChangeLog.debug( + "DIR* Namesystem.delete: " + iip.getPath() +" is removed"); } return collectedBlocks; } @@ -209,19 +205,18 @@ class FSDirDeleteOp { NameNode.getNameNodeMetrics().incrFilesDeleted(count); } - private static boolean deleteAllowed(final INodesInPath iip, - final String src) { + private static boolean deleteAllowed(final INodesInPath iip) { if (iip.length() < 1 || iip.getLastINode() == null) { if (NameNode.stateChangeLog.isDebugEnabled()) { NameNode.stateChangeLog.debug( "DIR* FSDirectory.unprotectedDelete: failed to remove " - + src + " because it does not exist"); + + iip.getPath() + " because it does not exist"); } return false; } else if (iip.length() == 1) { // src is the root NameNode.stateChangeLog.warn( - "DIR* FSDirectory.unprotectedDelete: failed to remove " + src + - " because the root is not allowed to be deleted"); + "DIR* FSDirectory.unprotectedDelete: failed to remove " + + iip.getPath() + " because the root is not allowed to be deleted"); return false; } return true; @@ -278,15 +273,19 @@ class FSDirDeleteOp { * Throw if the given directory has any non-empty protected descendants * (including itself). * - * @param src directory whose descendants are to be checked. The caller - * must ensure src is not terminated with {@link Path#SEPARATOR}. + * @param iip directory whose descendants are to be checked. * @throws AccessControlException if a non-empty protected descendant * was found. */ - private static void checkProtectedDescendants(FSDirectory fsd, String src) - throws AccessControlException, UnresolvedLinkException { + private static void checkProtectedDescendants( + FSDirectory fsd, INodesInPath iip) + throws AccessControlException, UnresolvedLinkException { final SortedSet protectedDirs = fsd.getProtectedDirectories(); + if (protectedDirs.isEmpty()) { + return; + } + String src = iip.getPath(); // Is src protected? Caller has already checked it is non-empty. if (protectedDirs.contains(src)) { throw new AccessControlException( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 21c94f8565f..8015a291897 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -352,7 +352,7 @@ public class FSEditLogLoader { INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true); if (oldFile != null && addCloseOp.overwrite) { // This is OP_ADD with overwrite - FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime); + FSDirDeleteOp.deleteForEditLog(fsDir, iip, addCloseOp.mtime); iip = INodesInPath.replace(iip, iip.length() - 1, null); oldFile = null; } @@ -552,10 +552,11 @@ public class FSEditLogLoader { } case OP_DELETE: { DeleteOp deleteOp = (DeleteOp)op; - FSDirDeleteOp.deleteForEditLog( - fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion), - deleteOp.timestamp); - + final String src = renameReservedPathsOnUpgrade( + deleteOp.path, logVersion); + final INodesInPath iip = fsDir.getINodesInPath4Write(src, false); + FSDirDeleteOp.deleteForEditLog(fsDir, iip, deleteOp.timestamp); + if (toAddRetryCache) { fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 4f2548a25bf..1ff17482b15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3784,7 +3784,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas."); BlocksMapUpdateInfo toRemoveBlocks = FSDirDeleteOp.deleteInternal( - FSNamesystem.this, bc.getName(), + FSNamesystem.this, INodesInPath.fromINode((INodeFile) bc), false); changed |= toRemoveBlocks != null; if (toRemoveBlocks != null) {