MAPREDUCE-4231. Update RAID to use the new BlockCollection interface.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1335661 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-05-08 17:53:15 +00:00
parent 0caac704fb
commit f6d6218348
4 changed files with 18 additions and 15 deletions

View File

@ -284,6 +284,9 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
(Bikas Saha via bobby)
MAPREDUCE-4231. Update RAID to use the new BlockCollection interface.
(szetszwo)
Release 0.23.3 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -144,7 +144,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
/** {@inheritDoc} */
@Override
public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor,
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
@ -425,7 +425,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
}
/**
* Cache results for FSInodeInfo.getFullPathName()
* Cache results for getFullPathName()
*/
static class CachedFullPathNames {
FSNamesystem namesystem;
@ -446,8 +446,8 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
};
static private class INodeWithHashCode {
FSInodeInfo inode;
INodeWithHashCode(FSInodeInfo inode) {
BlockCollection inode;
INodeWithHashCode(BlockCollection inode) {
this.inode = inode;
}
@Override
@ -459,11 +459,11 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
return System.identityHashCode(inode);
}
String getFullPathName() {
return inode.getFullPathName();
return inode.getName();
}
}
public String get(FSInodeInfo inode) throws IOException {
public String get(BlockCollection inode) throws IOException {
return cacheInternal.get(new INodeWithHashCode(inode));
}
}

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedFullPathNames;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.CachedLocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRaid.FileType;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeRaidTestUtil;
@ -241,7 +240,7 @@ public class TestBlockPlacementPolicyRaid {
// test full path cache
CachedFullPathNames cachedFullPathNames =
new CachedFullPathNames(namesystem);
final FSInodeInfo[] inodes = NameNodeRaidTestUtil.getFSInodeInfo(
final BlockCollection[] inodes = NameNodeRaidTestUtil.getBlockCollections(
namesystem, file1, file2);
verifyCachedFullPathNameResult(cachedFullPathNames, inodes[0]);
@ -477,14 +476,14 @@ public class TestBlockPlacementPolicyRaid {
}
private void verifyCachedFullPathNameResult(
CachedFullPathNames cachedFullPathNames, FSInodeInfo inode)
CachedFullPathNames cachedFullPathNames, BlockCollection inode)
throws IOException {
String res1 = inode.getFullPathName();
String res1 = inode.getName();
String res2 = cachedFullPathNames.get(inode);
LOG.info("Actual path name: " + res1);
LOG.info("Cached path name: " + res2);
Assert.assertEquals(cachedFullPathNames.get(inode),
inode.getFullPathName());
inode.getName());
}
private void verifyCachedBlocksResult(CachedLocatedBlocks cachedBlocks,
@ -503,7 +502,7 @@ public class TestBlockPlacementPolicyRaid {
private Collection<LocatedBlock> getCompanionBlocks(
FSNamesystem namesystem, BlockPlacementPolicyRaid policy,
ExtendedBlock block) throws IOException {
INodeFile inode = blockManager.blocksMap.getINode(block
INodeFile inode = (INodeFile)blockManager.blocksMap.getINode(block
.getLocalBlock());
FileType type = policy.getFileType(inode.getFullPathName());
return policy.getCompanionBlocks(inode.getFullPathName(), type,

View File

@ -18,16 +18,17 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
public class NameNodeRaidTestUtil {
public static FSInodeInfo[] getFSInodeInfo(final FSNamesystem namesystem,
public static BlockCollection[] getBlockCollections(final FSNamesystem namesystem,
final String... files) throws UnresolvedLinkException {
final FSInodeInfo[] inodes = new FSInodeInfo[files.length];
final BlockCollection[] inodes = new BlockCollection[files.length];
final FSDirectory dir = namesystem.dir;
dir.readLock();
try {
for(int i = 0; i < files.length; i++) {
inodes[i] = dir.rootDir.getNode(files[i], true);
inodes[i] = (BlockCollection)dir.rootDir.getNode(files[i], true);
}
return inodes;
} finally {