HDFS-4152. Merge r1406326 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1471508 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-04-24 16:22:55 +00:00
parent c5227bd362
commit b2b151139c
7 changed files with 89 additions and 31 deletions

View File

@ -59,6 +59,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4129. Add utility methods to dump NameNode in memory tree for
testing. (szetszwo via suresh)
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
OPTIMIZATIONS
BUG FIXES

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.util.ByteArray;
@ -765,7 +766,7 @@ public class FSDirectory implements Closeable {
if (removedDst != null) {
INode rmdst = removedDst;
removedDst = null;
List<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
}
@ -1002,7 +1003,7 @@ public class FSDirectory implements Closeable {
* @param collectedBlocks Blocks under the deleted directory
* @return true on successful deletion; else false
*/
boolean delete(String src, List<Block>collectedBlocks)
boolean delete(String src, BlocksMapUpdateInfo collectedBlocks)
throws UnresolvedLinkException {
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
@ -1055,7 +1056,7 @@ public class FSDirectory implements Closeable {
void unprotectedDelete(String src, long mtime)
throws UnresolvedLinkException {
assert hasWriteLock();
List<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
int filesRemoved = unprotectedDelete(src, collectedBlocks, mtime);
if (filesRemoved > 0) {
getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
@ -1070,7 +1071,7 @@ public class FSDirectory implements Closeable {
* @param mtime the time the inode is removed
* @return the number of inodes deleted; 0 if no inodes are deleted.
*/
int unprotectedDelete(String src, List<Block> collectedBlocks,
int unprotectedDelete(String src, BlocksMapUpdateInfo collectedBlocks,
long mtime) throws UnresolvedLinkException {
assert hasWriteLock();
src = normalizePath(src);

View File

@ -17,20 +17,20 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
@ -166,6 +166,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
@ -2749,7 +2750,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
boolean enforcePermission)
throws AccessControlException, SafeModeException, UnresolvedLinkException,
IOException {
ArrayList<Block> collectedBlocks = new ArrayList<Block>();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
writeLock();
@ -2781,21 +2782,26 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return true;
}
/**
/**
* From the given list, incrementally remove the blocks from blockManager
* Writelock is dropped and reacquired every BLOCK_DELETION_INCREMENT to
* ensure that other waiters on the lock can get in. See HDFS-2938
*
* @param blocks
* An instance of {@link BlocksMapUpdateInfo} which contains a list
* of blocks that need to be removed from blocksMap
*/
private void removeBlocks(List<Block> blocks) {
private void removeBlocks(BlocksMapUpdateInfo blocks) {
int start = 0;
int end = 0;
while (start < blocks.size()) {
List<Block> toDeleteList = blocks.getToDeleteList();
while (start < toDeleteList.size()) {
end = BLOCK_DELETION_INCREMENT + start;
end = end > blocks.size() ? blocks.size() : end;
end = end > toDeleteList.size() ? toDeleteList.size() : end;
writeLock();
try {
for (int i = start; i < end; i++) {
blockManager.removeBlock(blocks.get(i));
blockManager.removeBlock(toDeleteList.get(i));
}
} finally {
writeUnlock();
@ -2804,7 +2810,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
void removePathAndBlocks(String src, List<Block> blocks) {
/**
* Remove leases and blocks related to a given path
* @param src The given path
* @param blocks Containing the list of blocks to be deleted from blocksMap
*/
void removePathAndBlocks(String src, BlocksMapUpdateInfo blocks) {
assert hasWriteLock();
leaseManager.removeLeaseWithPrefixPath(src);
if (blocks == null) {
@ -2817,7 +2828,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
boolean trackBlockCounts = isSafeModeTrackingBlocks();
int numRemovedComplete = 0, numRemovedSafe = 0;
for (Block b : blocks) {
for (Block b : blocks.getToDeleteList()) {
if (trackBlockCounts) {
BlockInfo bi = blockManager.getStoredBlock(b);
if (bi.isComplete()) {

View File

@ -188,11 +188,15 @@ abstract class INode implements Comparable<byte[]> {
}
/**
* Collect all the blocks in all children of this INode.
* Count and return the number of files in the sub tree.
* Also clears references since this INode is deleted.
* Collect all the blocks in all children of this INode. Count and return the
* number of files in the sub tree. Also clears references since this INode is
* deleted.
*
* @param info
* Containing all the blocks collected from the children of this
* INode. These blocks later should be removed from the blocksMap.
*/
abstract int collectSubtreeBlocksAndClear(List<Block> v);
abstract int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info);
/** Compute {@link ContentSummary}. */
public final ContentSummary computeContentSummary() {
@ -492,4 +496,48 @@ abstract class INode implements Comparable<byte[]> {
out.print(s.substring(s.lastIndexOf(getClass().getSimpleName())));
out.println(")");
}
/**
* Information used for updating the blocksMap when deleting files.
*/
public static class BlocksMapUpdateInfo {
/**
* The list of blocks that need to be removed from blocksMap
*/
private List<Block> toDeleteList;
public BlocksMapUpdateInfo(List<Block> toDeleteList) {
this.toDeleteList = toDeleteList == null ? new ArrayList<Block>()
: toDeleteList;
}
public BlocksMapUpdateInfo() {
toDeleteList = new ArrayList<Block>();
}
/**
* @return The list of blocks that need to be removed from blocksMap
*/
public List<Block> getToDeleteList() {
return toDeleteList;
}
/**
* Add a to-be-deleted block into the
* {@link BlocksMapUpdateInfo#toDeleteList}
* @param toDelete the to-be-deleted block
*/
public void addDeleteBlock(Block toDelete) {
if (toDelete != null) {
toDeleteList.add(toDelete);
}
}
/**
* Clear {@link BlocksMapUpdateInfo#toDeleteList}
*/
public void clear() {
toDeleteList.clear();
}
}
}

View File

@ -27,7 +27,6 @@ import java.util.List;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import com.google.common.annotations.VisibleForTesting;
@ -431,13 +430,13 @@ class INodeDirectory extends INode {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
int total = 1;
if (children == null) {
return total;
}
for (INode child : children) {
total += child.collectSubtreeBlocksAndClear(v);
total += child.collectSubtreeBlocksAndClear(info);
}
parent = null;
children = null;

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.FsAction;
@ -152,11 +151,11 @@ class INodeFile extends INode implements BlockCollection {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
parent = null;
if(blocks != null && v != null) {
if(blocks != null && info != null) {
for (BlockInfo blk : blocks) {
v.add(blk);
info.addDeleteBlock(blk);
blk.setBlockCollection(null);
}
}

View File

@ -17,12 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
/**
* An INode representing a symbolic link.
@ -64,7 +61,7 @@ public class INodeSymlink extends INode {
}
@Override
int collectSubtreeBlocksAndClear(List<Block> v) {
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
return 1;
}