HDFS-4346. Add SequentialNumber as a base class for INodeId and GenerationStamp.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1428167 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-01-03 05:12:40 +00:00
parent 6f0eb6f5b1
commit 36c5fe9961
9 changed files with 36 additions and 143 deletions

View File

@ -174,6 +174,9 @@ Trunk (Unreleased)
HDFS-4334. Add a unique id to INode. (Brandon Li via szetszwo)
HDFS-4346. Add SequentialNumber as a base class for INodeId and
GenerationStamp. (szetszwo)
OPTIMIZATIONS
BUG FIXES

View File

@ -17,19 +17,18 @@
*/
package org.apache.hadoop.hdfs.server.common;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.SequentialNumber;
/****************************************************************
* A GenerationStamp is a Hadoop FS primitive, identified by a long.
****************************************************************/
@InterfaceAudience.Private
public class GenerationStamp implements Comparable<GenerationStamp> {
public class GenerationStamp extends SequentialNumber {
/**
* The first valid generation stamp.
* The last reserved generation stamp.
*/
public static final long FIRST_VALID_STAMP = 1000L;
public static final long LAST_RESERVED_STAMP = 1000L;
/**
* Generation stamp of blocks that pre-date the introduction
@ -37,62 +36,10 @@ public class GenerationStamp implements Comparable<GenerationStamp> {
*/
public static final long GRANDFATHER_GENERATION_STAMP = 0;
private AtomicLong genstamp = new AtomicLong();
/**
* Create a new instance, initialized to FIRST_VALID_STAMP.
* Create a new instance, initialized to {@link #LAST_RESERVED_STAMP}.
*/
public GenerationStamp() {
this(GenerationStamp.FIRST_VALID_STAMP);
}
/**
* Create a new instance, initialized to the specified value.
*/
GenerationStamp(long stamp) {
genstamp.set(stamp);
}
/**
* Returns the current generation stamp
*/
public long getStamp() {
return genstamp.get();
}
/**
* Sets the current generation stamp
*/
public void setStamp(long stamp) {
genstamp.set(stamp);
}
/**
* First increments the counter and then returns the stamp
*/
public long nextStamp() {
return genstamp.incrementAndGet();
}
@Override // Comparable
public int compareTo(GenerationStamp that) {
long stamp1 = this.genstamp.get();
long stamp2 = that.genstamp.get();
return stamp1 < stamp2 ? -1 :
stamp1 > stamp2 ? 1 : 0;
}
@Override // Object
public boolean equals(Object o) {
if (!(o instanceof GenerationStamp)) {
return false;
}
return compareTo((GenerationStamp)o) == 0;
}
@Override // Object
public int hashCode() {
long stamp = genstamp.get();
return (int) (stamp^(stamp>>>32));
super(LAST_RESERVED_STAMP);
}
}

View File

@ -378,25 +378,29 @@ private void logAuditEvent(boolean succeeded,
private INodeId inodeId;
/**
* Set the last allocated inode id when fsimage is loaded or editlog is
* applied.
* @throws IOException
* Set the last allocated inode id when fsimage or editlog is loaded.
*/
public void resetLastInodeId(long newValue) throws IOException {
inodeId.resetLastInodeId(newValue);
try {
inodeId.skipTo(newValue);
} catch(IllegalStateException ise) {
throw new IOException(ise);
}
}
/** Should only be used for tests to reset to any value */
void resetLastInodeIdWithoutChecking(long newValue) {
inodeId.resetLastInodeIdWithoutChecking(newValue);
inodeId.setCurrentValue(newValue);
}
/** @return the last inode ID. */
public long getLastInodeId() {
return inodeId.getLastInodeId();
return inodeId.getCurrentValue();
}
/** Allocate a new inode ID. */
public long allocateNewInodeId() {
return inodeId.allocateNewInodeId();
return inodeId.nextValue();
}
/**
@ -405,9 +409,9 @@ public long allocateNewInodeId() {
void clear() {
dir.reset();
dtSecretManager.reset();
generationStamp.setStamp(GenerationStamp.FIRST_VALID_STAMP);
generationStamp.setCurrentValue(GenerationStamp.LAST_RESERVED_STAMP);
leaseManager.removeAllLeases();
inodeId.resetLastInodeIdWithoutChecking(INodeId.LAST_RESERVED_ID);
inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
}
@VisibleForTesting
@ -2537,8 +2541,7 @@ private Block allocateBlock(String src, INodesInPath inodesInPath,
b.setBlockId(DFSUtil.getRandom().nextLong());
}
// Increment the generation stamp for every new block.
nextGenerationStamp();
b.setGenerationStamp(getGenerationStamp());
b.setGenerationStamp(nextGenerationStamp());
b = dir.addBlock(src, inodesInPath, b, targets);
NameNode.stateChangeLog.info("BLOCK* allocateBlock: " + src + ". "
+ blockPoolId + " " + b);
@ -4762,14 +4765,14 @@ public int getNumStaleDataNodes() {
* Sets the generation stamp for this filesystem
*/
void setGenerationStamp(long stamp) {
generationStamp.setStamp(stamp);
generationStamp.setCurrentValue(stamp);
}
/**
* Gets the generation stamp for this filesystem
*/
long getGenerationStamp() {
return generationStamp.getStamp();
return generationStamp.getCurrentValue();
}
/**
@ -4781,7 +4784,7 @@ private long nextGenerationStamp() throws SafeModeException {
throw new SafeModeException(
"Cannot get next generation stamp", safeMode);
}
long gs = generationStamp.nextStamp();
final long gs = generationStamp.nextValue();
getEditLog().logGenerationStamp(gs);
// NB: callers sync the log
return gs;

View File

@ -17,16 +17,14 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.SequentialNumber;
/**
* An id which uniquely identifies an inode
*/
@InterfaceAudience.Private
class INodeId implements Comparable<INodeId> {
class INodeId extends SequentialNumber {
/**
* The last reserved inode id. Reserve id 1 to 1000 for potential future
* usage. The id won't be recycled and is not expected to wrap around in a
@ -40,66 +38,7 @@ class INodeId implements Comparable<INodeId> {
*/
public static final long GRANDFATHER_INODE_ID = 0;
private AtomicLong lastInodeId = new AtomicLong();
/**
* Create a new instance, initialized to LAST_RESERVED_ID.
*/
INodeId() {
lastInodeId.set(INodeId.LAST_RESERVED_ID);
}
/**
* Set the last allocated inode id when fsimage is loaded or editlog is
* applied.
* @throws IOException
*/
void resetLastInodeId(long newValue) throws IOException {
if (newValue < getLastInodeId()) {
throw new IOException(
"Can't reset lastInodeId to be less than its current value "
+ getLastInodeId() + ", newValue=" + newValue);
}
lastInodeId.set(newValue);
}
void resetLastInodeIdWithoutChecking(long newValue) {
lastInodeId.set(newValue);
}
long getLastInodeId() {
return lastInodeId.get();
}
/**
* First increment the counter and then get the id.
*/
long allocateNewInodeId() {
return lastInodeId.incrementAndGet();
}
@Override
// Comparable
public int compareTo(INodeId that) {
long id1 = this.getLastInodeId();
long id2 = that.getLastInodeId();
return id1 < id2 ? -1 : id1 > id2 ? 1 : 0;
}
@Override
// Object
public boolean equals(Object o) {
if (!(o instanceof INodeId)) {
return false;
}
return compareTo((INodeId) o) == 0;
}
@Override
// Object
public int hashCode() {
long id = getLastInodeId();
return (int) (id ^ (id >>> 32));
super(LAST_RESERVED_ID);
}
}

View File

@ -56,7 +56,7 @@ public void testBlockListMoveToHead() throws Exception {
LOG.info("Building block list...");
for (int i = 0; i < MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
blockInfoList.add(new BlockInfo(blockList.get(i), 3));
dd.addBlock(blockInfoList.get(i));

View File

@ -54,7 +54,7 @@ public void testCompInvalidate() throws Exception {
for (int i=0; i<nodes.length; i++) {
for(int j=0; j<3*blockInvalidateLimit+1; j++) {
Block block = new Block(i*(blockInvalidateLimit+1)+j, 0,
GenerationStamp.FIRST_VALID_STAMP);
GenerationStamp.LAST_RESERVED_STAMP);
bm.addToInvalidates(block, nodes[i]);
}
}

View File

@ -44,7 +44,7 @@ public void testGetInvalidateBlocks() throws Exception {
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();
ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
for (int i=0; i<MAX_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
Block[] bc = dd.getInvalidateBlocks(MAX_LIMIT);

View File

@ -75,7 +75,8 @@ public void testHeartbeat() throws Exception {
synchronized(hm) {
for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
dd.addBlockToBeReplicated(
new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP),
ONE_TARGET);
}
DatanodeCommand[] cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd,
namesystem).getCommands();
@ -85,7 +86,7 @@ public void testHeartbeat() throws Exception {
ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
}
dd.addBlocksToBeInvalidated(blockList);
cmds = NameNodeAdapter.sendHeartBeat(nodeReg, dd, namesystem)

View File

@ -54,7 +54,7 @@ public class CreateEditsLog {
static final String EDITS_DIR = "/tmp/EditsLogOut";
static String edits_dir = EDITS_DIR;
static final public long BLOCK_GENERATION_STAMP =
GenerationStamp.FIRST_VALID_STAMP;
GenerationStamp.LAST_RESERVED_STAMP;
static void addFiles(FSEditLog editLog, int numFiles, short replication,
int blocksPerFile, long startingBlockId,