HDFS-7573. Consolidate the implementation of delete() into a single class. Contributed by Haohui Mai.

This commit is contained in:
Haohui Mai 2014-12-27 07:21:52 +08:00
parent 2908fe4ec5
commit 24315e7d37
6 changed files with 287 additions and 243 deletions

View File

@ -522,6 +522,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7189. Add trace spans for DFSClient metadata operations. (Colin P. HDFS-7189. Add trace spans for DFSClient metadata operations. (Colin P.
McCabe via yliu) McCabe via yliu)
HDFS-7573. Consolidate the implementation of delete() into a single class.
(wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-7454. Reduce memory footprint for AclEntries in NameNode. HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

View File

@ -0,0 +1,246 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.util.ChunkedArrayList;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
import static org.apache.hadoop.util.Time.now;
class FSDirDeleteOp {
/**
* Delete the target directory and collect the blocks under it
*
* @param iip the INodesInPath instance containing all the INodes for the path
* @param collectedBlocks Blocks under the deleted directory
* @param removedINodes INodes that should be removed from inodeMap
* @return the number of files that have been removed
*/
static long delete(
FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, long mtime) throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
}
final long filesRemoved;
fsd.writeLock();
try {
if (!deleteAllowed(iip, iip.getPath()) ) {
filesRemoved = -1;
} else {
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
filesRemoved = unprotectedDelete(fsd, iip, collectedBlocks,
removedINodes, mtime);
fsd.getFSNamesystem().removeSnapshottableDirs(snapshottableDirs);
}
} finally {
fsd.writeUnlock();
}
return filesRemoved;
}
/**
* Remove a file/directory from the namespace.
* <p>
* For large directories, deletion is incremental. The blocks under
* the directory are collected and deleted a small number at a time holding
* the {@link FSNamesystem} lock.
* <p>
* For small directory or file the deletion is done in one shot.
*
*/
static BlocksMapUpdateInfo delete(
FSNamesystem fsn, String src, boolean recursive, boolean logRetryCache)
throws IOException {
FSDirectory fsd = fsn.getFSDirectory();
FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
src = fsd.resolvePath(pc, src, pathComponents);
final INodesInPath iip = fsd.getINodesInPath4Write(src, false);
if (!recursive && fsd.isNonEmptyDirectory(iip)) {
throw new PathIsNotEmptyDirectoryException(src + " is non empty");
}
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
FsAction.ALL, true);
}
return deleteInternal(fsn, src, iip, logRetryCache);
}
/**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
* <br>
* Note: This is to be used by
* {@link org.apache.hadoop.hdfs.server.namenode.FSEditLog} only.
* <br>
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
*/
static void deleteForEditLog(FSDirectory fsd, String src, long mtime)
throws IOException {
assert fsd.hasWriteLock();
FSNamesystem fsn = fsd.getFSNamesystem();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
final INodesInPath iip = fsd.getINodesInPath4Write(
FSDirectory.normalizePath(src), false);
if (!deleteAllowed(iip, src)) {
return;
}
List<INodeDirectory> snapshottableDirs = new ArrayList<>();
FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
long filesRemoved = unprotectedDelete(
fsd, iip, collectedBlocks, removedINodes, mtime);
fsn.removeSnapshottableDirs(snapshottableDirs);
if (filesRemoved >= 0) {
fsn.removeLeasesAndINodes(src, removedINodes, false);
fsn.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
}
}
/**
* Remove a file/directory from the namespace.
* <p>
* For large directories, deletion is incremental. The blocks under
* the directory are collected and deleted a small number at a time holding
* the {@link org.apache.hadoop.hdfs.server.namenode.FSNamesystem} lock.
* <p>
* For small directory or file the deletion is done in one shot.
*/
static BlocksMapUpdateInfo deleteInternal(
FSNamesystem fsn, String src, INodesInPath iip, boolean logRetryCache)
throws IOException {
assert fsn.hasWriteLock();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
}
FSDirectory fsd = fsn.getFSDirectory();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>();
long mtime = now();
// Unlink the target directory from directory tree
long filesRemoved = delete(
fsd, iip, collectedBlocks, removedINodes, mtime);
if (filesRemoved < 0) {
return null;
}
fsd.getEditLog().logDelete(src, mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
fsn.removeLeasesAndINodes(src, removedINodes, true);
fsd.getEditLog().logSync();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
+ src +" is removed");
}
return collectedBlocks;
}
static void incrDeletedFileCount(long count) {
NameNode.getNameNodeMetrics().incrFilesDeleted(count);
}
private static boolean deleteAllowed(final INodesInPath iip,
final String src) {
if (iip.length() < 1 || iip.getLastINode() == null) {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
"DIR* FSDirectory.unprotectedDelete: failed to remove "
+ src + " because it does not exist");
}
return false;
} else if (iip.length() == 1) { // src is the root
NameNode.stateChangeLog.warn(
"DIR* FSDirectory.unprotectedDelete: failed to remove " + src +
" because the root is not allowed to be deleted");
return false;
}
return true;
}
/**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
* @param iip the inodes resolved from the path
* @param collectedBlocks blocks collected from the deleted path
* @param removedINodes inodes that should be removed from inodeMap
* @param mtime the time the inode is removed
* @return the number of inodes deleted; 0 if no inodes are deleted.
*/
private static long unprotectedDelete(
FSDirectory fsd, INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, long mtime) throws QuotaExceededException {
assert fsd.hasWriteLock();
// check if target node exists
INode targetNode = iip.getLastINode();
if (targetNode == null) {
return -1;
}
// record modification
final int latestSnapshot = iip.getLatestSnapshotId();
targetNode.recordModification(latestSnapshot);
// Remove the node from the namespace
long removed = fsd.removeLastINode(iip);
if (removed == -1) {
return -1;
}
// set the parent's modification time
final INodeDirectory parent = targetNode.getParent();
parent.updateModificationTime(mtime, latestSnapshot);
if (removed == 0) {
return 0;
}
// collect block
if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else {
Quota.Counts counts = targetNode.cleanSubtree(CURRENT_STATE_ID,
latestSnapshot, collectedBlocks, removedINodes, true);
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true);
removed = counts.get(Quota.NAMESPACE);
}
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+ iip.getPath() + " is removed");
}
return removed;
}
}

View File

@ -281,7 +281,7 @@ class FSDirRenameOp {
try { try {
if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime, if (unprotectedRenameTo(fsd, src, dst, srcIIP, dstIIP, mtime,
collectedBlocks, options)) { collectedBlocks, options)) {
fsd.getFSNamesystem().incrDeletedFileCount(1); FSDirDeleteOp.incrDeletedFileCount(1);
} }
} finally { } finally {
fsd.writeUnlock(); fsd.writeUnlock();
@ -731,8 +731,7 @@ class FSDirRenameOp {
dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes, dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
true).get(Quota.NAMESPACE) >= 0; true).get(Quota.NAMESPACE) >= 0;
} }
fsd.getFSNamesystem().removePathAndBlocks(src, null, removedINodes, fsd.getFSNamesystem().removeLeasesAndINodes(src, removedINodes, false);
false);
return filesDeleted; return filesDeleted;
} }

View File

@ -58,9 +58,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.ByteArray; import org.apache.hadoop.hdfs.util.ByteArray;
import org.apache.hadoop.util.ChunkedArrayList;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -537,54 +535,6 @@ public class FSDirectory implements Closeable {
return resolvePath(path, pathComponents, this); return resolvePath(path, pathComponents, this);
} }
/**
* Delete the target directory and collect the blocks under it
*
* @param iip the INodesInPath instance containing all the INodes for the path
* @param collectedBlocks Blocks under the deleted directory
* @param removedINodes INodes that should be removed from {@link #inodeMap}
* @return the number of files that have been removed
*/
long delete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, long mtime) throws IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + iip.getPath());
}
final long filesRemoved;
writeLock();
try {
if (!deleteAllowed(iip, iip.getPath()) ) {
filesRemoved = -1;
} else {
List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
FSDirSnapshotOp.checkSnapshot(iip.getLastINode(), snapshottableDirs);
filesRemoved = unprotectedDelete(iip, collectedBlocks,
removedINodes, mtime);
namesystem.removeSnapshottableDirs(snapshottableDirs);
}
} finally {
writeUnlock();
}
return filesRemoved;
}
private static boolean deleteAllowed(final INodesInPath iip,
final String src) {
if (iip.length() < 1 || iip.getLastINode() == null) {
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+ "failed to remove " + src + " because it does not exist");
}
return false;
} else if (iip.length() == 1) { // src is the root
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
+ "failed to remove " + src
+ " because the root is not allowed to be deleted");
return false;
}
return true;
}
/** /**
* @return true if the path is a non-empty directory; otherwise, return false. * @return true if the path is a non-empty directory; otherwise, return false.
*/ */
@ -603,92 +553,6 @@ public class FSDirectory implements Closeable {
} }
} }
/**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
* <br>
* Note: This is to be used by {@link FSEditLog} only.
* <br>
* @param src a string representation of a path to an inode
* @param mtime the time the inode is removed
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException, IOException {
assert hasWriteLock();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>();
final INodesInPath inodesInPath = getINodesInPath4Write(
normalizePath(src), false);
long filesRemoved = -1;
if (deleteAllowed(inodesInPath, src)) {
List<INodeDirectory> snapshottableDirs = new ArrayList<INodeDirectory>();
FSDirSnapshotOp.checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
removedINodes, mtime);
namesystem.removeSnapshottableDirs(snapshottableDirs);
}
if (filesRemoved >= 0) {
getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
removedINodes, false);
}
}
/**
* Delete a path from the name space
* Update the count at each ancestor directory with quota
* @param iip the inodes resolved from the path
* @param collectedBlocks blocks collected from the deleted path
* @param removedINodes inodes that should be removed from {@link #inodeMap}
* @param mtime the time the inode is removed
* @return the number of inodes deleted; 0 if no inodes are deleted.
*/
long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
List<INode> removedINodes, long mtime) throws QuotaExceededException {
assert hasWriteLock();
// check if target node exists
INode targetNode = iip.getLastINode();
if (targetNode == null) {
return -1;
}
// record modification
final int latestSnapshot = iip.getLatestSnapshotId();
targetNode.recordModification(latestSnapshot);
// Remove the node from the namespace
long removed = removeLastINode(iip);
if (removed == -1) {
return -1;
}
// set the parent's modification time
final INodeDirectory parent = targetNode.getParent();
parent.updateModificationTime(mtime, latestSnapshot);
if (removed == 0) {
return 0;
}
// collect block
if (!targetNode.isInLatestSnapshot(latestSnapshot)) {
targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes);
} else {
Quota.Counts counts = targetNode.cleanSubtree(CURRENT_STATE_ID,
latestSnapshot, collectedBlocks, removedINodes, true);
parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE),
-counts.get(Quota.DISKSPACE), true);
removed = counts.get(Quota.NAMESPACE);
}
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+ iip.getPath() + " is removed");
}
return removed;
}
/** /**
* Check whether the filepath could be created * Check whether the filepath could be created
* @throws SnapshotAccessControlException if path is in RO snapshot * @throws SnapshotAccessControlException if path is in RO snapshot

View File

@ -347,7 +347,7 @@ public class FSEditLogLoader {
INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true); INodeFile oldFile = INodeFile.valueOf(iip.getLastINode(), path, true);
if (oldFile != null && addCloseOp.overwrite) { if (oldFile != null && addCloseOp.overwrite) {
// This is OP_ADD with overwrite // This is OP_ADD with overwrite
fsDir.unprotectedDelete(path, addCloseOp.mtime); FSDirDeleteOp.deleteForEditLog(fsDir, path, addCloseOp.mtime);
iip = INodesInPath.replace(iip, iip.length() - 1, null); iip = INodesInPath.replace(iip, iip.length() - 1, null);
oldFile = null; oldFile = null;
} }
@ -520,8 +520,8 @@ public class FSEditLogLoader {
} }
case OP_DELETE: { case OP_DELETE: {
DeleteOp deleteOp = (DeleteOp)op; DeleteOp deleteOp = (DeleteOp)op;
fsDir.unprotectedDelete( FSDirDeleteOp.deleteForEditLog(
renameReservedPathsOnUpgrade(deleteOp.path, logVersion), fsDir, renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
deleteOp.timestamp); deleteOp.timestamp);
if (toAddRetryCache) { if (toAddRetryCache) {

View File

@ -150,7 +150,6 @@ import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
@ -2474,11 +2473,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (overwrite) { if (overwrite) {
toRemoveBlocks = new BlocksMapUpdateInfo(); toRemoveBlocks = new BlocksMapUpdateInfo();
List<INode> toRemoveINodes = new ChunkedArrayList<INode>(); List<INode> toRemoveINodes = new ChunkedArrayList<INode>();
long ret = dir.delete(iip, toRemoveBlocks, toRemoveINodes, now()); long ret = FSDirDeleteOp.delete(dir, iip, toRemoveBlocks,
toRemoveINodes, now());
if (ret >= 0) { if (ret >= 0) {
iip = INodesInPath.replace(iip, iip.length() - 1, null); iip = INodesInPath.replace(iip, iip.length() - 1, null);
incrDeletedFileCount(ret); FSDirDeleteOp.incrDeletedFileCount(ret);
removePathAndBlocks(src, null, toRemoveINodes, true); removeLeasesAndINodes(src, toRemoveINodes, true);
} }
} else { } else {
// If lease soft limit time is expired, recover the lease // If lease soft limit time is expired, recover the lease
@ -3531,30 +3531,29 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
* description of exceptions * description of exceptions
*/ */
boolean delete(String src, boolean recursive, boolean logRetryCache) boolean delete(String src, boolean recursive, boolean logRetryCache)
throws AccessControlException, SafeModeException, throws IOException {
UnresolvedLinkException, IOException { waitForLoadingFSImage();
checkOperation(OperationCategory.WRITE);
BlocksMapUpdateInfo toRemovedBlocks = null;
writeLock();
boolean ret = false; boolean ret = false;
try { try {
ret = deleteInt(src, recursive, logRetryCache); checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
toRemovedBlocks = FSDirDeleteOp.delete(
this, src, recursive, logRetryCache);
ret = toRemovedBlocks != null;
} catch (AccessControlException e) { } catch (AccessControlException e) {
logAuditEvent(false, "delete", src); logAuditEvent(false, "delete", src);
throw e; throw e;
} finally {
writeUnlock();
} }
return ret; if (toRemovedBlocks != null) {
removeBlocks(toRemovedBlocks); // Incremental deletion of blocks
} }
private boolean deleteInt(String src, boolean recursive, boolean logRetryCache)
throws AccessControlException, SafeModeException,
UnresolvedLinkException, IOException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
}
boolean status = deleteInternal(src, recursive, true, logRetryCache);
if (status) {
logAuditEvent(true, "delete", src); logAuditEvent(true, "delete", src);
} return ret;
return status;
} }
FSPermissionChecker getPermissionChecker() FSPermissionChecker getPermissionChecker()
@ -3562,69 +3561,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return dir.getPermissionChecker(); return dir.getPermissionChecker();
} }
/**
* Remove a file/directory from the namespace.
* <p>
* For large directories, deletion is incremental. The blocks under
* the directory are collected and deleted a small number at a time holding
* the {@link FSNamesystem} lock.
* <p>
* For small directory or file the deletion is done in one shot.
*
* @see ClientProtocol#delete(String, boolean) for description of exceptions
*/
private boolean deleteInternal(String src, boolean recursive,
boolean enforcePermission, boolean logRetryCache)
throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException {
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<INode>();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
boolean ret = false;
waitForLoadingFSImage();
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot delete " + src);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath4Write(src, false);
if (!recursive && dir.isNonEmptyDirectory(iip)) {
throw new PathIsNotEmptyDirectoryException(src + " is non empty");
}
if (enforcePermission && isPermissionEnabled) {
dir.checkPermission(pc, iip, false, null, FsAction.WRITE, null,
FsAction.ALL, true);
}
long mtime = now();
// Unlink the target directory from directory tree
long filesRemoved = dir.delete(iip, collectedBlocks, removedINodes,
mtime);
if (filesRemoved < 0) {
return false;
}
getEditLog().logDelete(src, mtime, logRetryCache);
incrDeletedFileCount(filesRemoved);
// Blocks/INodes will be handled later
removePathAndBlocks(src, null, removedINodes, true);
ret = true;
} finally {
writeUnlock();
}
getEditLog().logSync();
removeBlocks(collectedBlocks); // Incremental deletion of blocks
collectedBlocks.clear();
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* Namesystem.delete: "
+ src +" is removed");
}
return ret;
}
/** /**
* From the given list, incrementally remove the blocks from blockManager * From the given list, incrementally remove the blocks from blockManager
* Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to * Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
@ -3650,15 +3586,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
/** /**
* Remove leases, inodes and blocks related to a given path * Remove leases and inodes related to a given path
* @param src The given path * @param src The given path
* @param blocks Containing the list of blocks to be deleted from blocksMap
* @param removedINodes Containing the list of inodes to be removed from * @param removedINodes Containing the list of inodes to be removed from
* inodesMap * inodesMap
* @param acquireINodeMapLock Whether to acquire the lock for inode removal * @param acquireINodeMapLock Whether to acquire the lock for inode removal
*/ */
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks, void removeLeasesAndINodes(String src, List<INode> removedINodes,
List<INode> removedINodes, final boolean acquireINodeMapLock) { final boolean acquireINodeMapLock) {
assert hasWriteLock(); assert hasWriteLock();
leaseManager.removeLeaseWithPrefixPath(src); leaseManager.removeLeaseWithPrefixPath(src);
// remove inodes from inodesMap // remove inodes from inodesMap
@ -3675,11 +3610,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
removedINodes.clear(); removedINodes.clear();
} }
if (blocks == null) {
return;
}
removeBlocksAndUpdateSafemodeTotal(blocks);
} }
/** /**
@ -4500,10 +4430,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
} }
void incrDeletedFileCount(long count) {
NameNode.getNameNodeMetrics().incrFilesDeleted(count);
}
/** /**
* Close file. * Close file.
* @param path * @param path
@ -4638,7 +4564,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
for (BlockCollection bc : filesToDelete) { for (BlockCollection bc : filesToDelete) {
LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas."); LOG.warn("Removing lazyPersist file " + bc.getName() + " with no replicas.");
deleteInternal(bc.getName(), false, false, false); BlocksMapUpdateInfo toRemoveBlocks =
FSDirDeleteOp.deleteInternal(
FSNamesystem.this, bc.getName(),
INodesInPath.fromINode((INodeFile) bc), false);
if (toRemoveBlocks != null) {
removeBlocks(toRemoveBlocks); // Incremental deletion of blocks
}
} }
} finally { } finally {
writeUnlock(); writeUnlock();