svn merge -c 1332876 from trunk for HDFS-3339. Change INode to package private.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1332877 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-05-01 23:03:36 +00:00
parent 7c45bfee33
commit 06770ff55b
3 changed files with 8 additions and 5 deletions

View File

@ -275,6 +275,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs. HDFS-3322. Use HdfsDataInputStream and HdfsDataOutputStream in Hdfs.
(szetszwo) (szetszwo)
HDFS-3339. Change INode to package private. (John George via szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2477. Optimize computing the diff between a block report and the HDFS-2477. Optimize computing the diff between a block report and the

View File

@ -57,7 +57,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats; import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -2382,7 +2381,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
// necessary. In that case, put block on a possibly-will- // necessary. In that case, put block on a possibly-will-
// be-replicated list. // be-replicated list.
// //
INode fileINode = blocksMap.getINode(block); INodeFile fileINode = blocksMap.getINode(block);
if (fileINode != null) { if (fileINode != null) {
namesystem.decrementSafeBlockCount(block); namesystem.decrementSafeBlockCount(block);
updateNeededReplications(block, -1, 0); updateNeededReplications(block, -1, 0);
@ -2614,7 +2613,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
NumberReplicas num) { NumberReplicas num) {
int curReplicas = num.liveReplicas(); int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block); int curExpectedReplicas = getReplication(block);
INode fileINode = blocksMap.getINode(block); INodeFile fileINode = blocksMap.getINode(block);
Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block); Iterator<DatanodeDescriptor> nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder(); StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) { while (nodeIter.hasNext()) {
@ -2665,7 +2664,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
final Iterator<? extends Block> it = srcNode.getBlockIterator(); final Iterator<? extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) { while(it.hasNext()) {
final Block block = it.next(); final Block block = it.next();
INode fileINode = blocksMap.getINode(block); INodeFile fileINode = blocksMap.getINode(block);
if (fileINode != null) { if (fileINode != null) {
NumberReplicas num = countNodes(block); NumberReplicas num = countNodes(block);

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
@ -34,7 +35,8 @@ import org.apache.hadoop.util.StringUtils;
* This is a base INode class containing common fields for file and * This is a base INode class containing common fields for file and
* directory inodes. * directory inodes.
*/ */
public abstract class INode implements Comparable<byte[]>, FSInodeInfo { @InterfaceAudience.Private
abstract class INode implements Comparable<byte[]>, FSInodeInfo {
/* /*
* The inode name is in java UTF8 encoding; * The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this. * The name in HdfsFileStatus should keep the same encoding as this.