svn merge -c 1348998 from trunk for HDFS-3052. Change INodeFile and INodeFileUnderConstruction to package private.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1349089 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-06-12 01:57:38 +00:00
parent 2846979f16
commit 97ab65f3ab
8 changed files with 40 additions and 49 deletions

View File

@ -65,6 +65,9 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-1013. Miscellaneous improvements to HTML markup for web UIs HDFS-1013. Miscellaneous improvements to HTML markup for web UIs
(Eugene Koontz via todd) (Eugene Koontz via todd)
HDFS-3052. Change INodeFile and INodeFileUnderConstruction to package
private. (szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log HDFS-2982. Startup performance suffers when there are many edit log

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.util.LightWeightGSet; import org.apache.hadoop.hdfs.util.LightWeightGSet;
@ -27,7 +28,10 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
* the {@link BlockCollection} it is part of and datanodes where the replicas of * the {@link BlockCollection} it is part of and datanodes where the replicas of
* the block are stored. * the block are stored.
*/ */
@InterfaceAudience.Private
public class BlockInfo extends Block implements LightWeightGSet.LinkedElement { public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
public static final BlockInfo[] EMPTY_ARRAY = {};
private BlockCollection bc; private BlockCollection bc;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */ /** For implementing {@link LightWeightGSet.LinkedElement} interface */

View File

@ -277,7 +277,7 @@ public class FSDirectory implements Closeable {
preferredBlockSize, modificationTime, clientName, preferredBlockSize, modificationTime, clientName,
clientMachine, null); clientMachine, null);
} else { } else {
newNode = new INodeFile(permissions, 0, replication, newNode = new INodeFile(permissions, BlockInfo.EMPTY_ARRAY, replication,
modificationTime, atime, preferredBlockSize); modificationTime, atime, preferredBlockSize);
} }

View File

@ -1590,7 +1590,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
try { try {
INode myFile = dir.getFileINode(src); INodeFile myFile = dir.getFileINode(src);
recoverLeaseInternal(myFile, src, holder, clientMachine, false); recoverLeaseInternal(myFile, src, holder, clientMachine, false);
try { try {
@ -1666,22 +1666,20 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
* @throws UnresolvedLinkException * @throws UnresolvedLinkException
* @throws IOException * @throws IOException
*/ */
public LocatedBlock prepareFileForWrite(String src, INode file, LocatedBlock prepareFileForWrite(String src, INodeFile file,
String leaseHolder, String clientMachine, DatanodeDescriptor clientNode, String leaseHolder, String clientMachine, DatanodeDescriptor clientNode,
boolean writeToEditLog) boolean writeToEditLog) throws IOException {
throws UnresolvedLinkException, IOException {
INodeFile node = (INodeFile) file;
INodeFileUnderConstruction cons = new INodeFileUnderConstruction( INodeFileUnderConstruction cons = new INodeFileUnderConstruction(
node.getLocalNameBytes(), file.getLocalNameBytes(),
node.getReplication(), file.getReplication(),
node.getModificationTime(), file.getModificationTime(),
node.getPreferredBlockSize(), file.getPreferredBlockSize(),
node.getBlocks(), file.getBlocks(),
node.getPermissionStatus(), file.getPermissionStatus(),
leaseHolder, leaseHolder,
clientMachine, clientMachine,
clientNode); clientNode);
dir.replaceNode(src, node, cons); dir.replaceNode(src, file, cons);
leaseManager.addLease(cons.getClientName(), src); leaseManager.addLease(cons.getClientName(), src);
LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons); LocatedBlock ret = blockManager.convertLastBlockToUnderConstruction(cons);

View File

@ -25,13 +25,13 @@ import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
/** I-node for closed file. */ /** I-node for closed file. */
@InterfaceAudience.Private @InterfaceAudience.Private
public class INodeFile extends INode implements BlockCollection { class INodeFile extends INode implements BlockCollection {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111); static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size //Number of bits for Block size
@ -45,13 +45,6 @@ public class INodeFile extends INode implements BlockCollection {
BlockInfo blocks[] = null; BlockInfo blocks[] = null;
INodeFile(PermissionStatus permissions,
int nrBlocks, short replication, long modificationTime,
long atime, long preferredBlockSize) {
this(permissions, new BlockInfo[nrBlocks], replication,
modificationTime, atime, preferredBlockSize);
}
INodeFile(PermissionStatus permissions, BlockInfo[] blklist, INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
short replication, long modificationTime, short replication, long modificationTime,
long atime, long preferredBlockSize) { long atime, long preferredBlockSize) {

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
@ -32,8 +33,8 @@ import com.google.common.base.Joiner;
/** /**
* I-node for file being written. * I-node for file being written.
*/ */
public class INodeFileUnderConstruction extends INodeFile @InterfaceAudience.Private
implements MutableBlockCollection { class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollection {
private String clientName; // lease holder private String clientName; // lease holder
private final String clientMachine; private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too. private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@ -45,7 +46,7 @@ public class INodeFileUnderConstruction extends INodeFile
String clientName, String clientName,
String clientMachine, String clientMachine,
DatanodeDescriptor clientNode) { DatanodeDescriptor clientNode) {
super(permissions.applyUMask(UMASK), 0, replication, super(permissions.applyUMask(UMASK), BlockInfo.EMPTY_ARRAY, replication,
modTime, modTime, preferredBlockSize); modTime, modTime, preferredBlockSize);
this.clientName = clientName; this.clientName = clientName;
this.clientMachine = clientMachine; this.clientMachine = clientMachine;

View File

@ -18,39 +18,34 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSImage; import org.apache.hadoop.hdfs.server.namenode.FSImage;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import static org.junit.Assert.*;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.Lists;
/** /**
* A JUnit test for checking if restarting DFS preserves the * A JUnit test for checking if restarting DFS preserves the
* blocks that are part of an unclosed file. * blocks that are part of an unclosed file.

View File

@ -17,7 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.*; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -29,14 +31,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -381,11 +378,11 @@ public class TestBlockManager {
} }
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) { private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
INodeFile iNode = Mockito.mock(INodeFile.class); BlockCollection bc = Mockito.mock(BlockCollection.class);
Mockito.doReturn((short)3).when(iNode).getReplication(); Mockito.doReturn((short)3).when(bc).getReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes); BlockInfo blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, iNode); bm.blocksMap.addBlockCollection(blockInfo, bc);
return blockInfo; return blockInfo;
} }