HDFS-4206. Merge change r1410996 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1471522 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-04-24 16:38:53 +00:00
parent b2b151139c
commit 876c1c9098
10 changed files with 169 additions and 137 deletions

View File

@ -62,6 +62,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo) INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
HDFS-4206. Change the fields in INode and its subclasses to private.
(szetszwo)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -305,14 +305,14 @@ public class FSDirectory implements Closeable {
return newNode; return newNode;
} }
INodeDirectory addToParent(byte[] src, INodeDirectory parentINode, INodeDirectory addToParent(INodeDirectory parentINode,
INode newNode, boolean propagateModTime) { INode newNode, boolean propagateModTime) {
// NOTE: This does not update space counts for parents // NOTE: This does not update space counts for parents
INodeDirectory newParent = null; INodeDirectory newParent = null;
writeLock(); writeLock();
try { try {
try { try {
newParent = rootDir.addToParent(src, newNode, parentINode, newParent = rootDir.addToParent(newNode, parentINode,
propagateModTime); propagateModTime);
cacheName(newNode); cacheName(newNode);
} catch (FileNotFoundException e) { } catch (FileNotFoundException e) {
@ -538,7 +538,7 @@ public class FSDirectory implements Closeable {
return true; return true;
} }
if (srcInode.isSymlink() && if (srcInode.isSymlink() &&
dst.equals(((INodeSymlink)srcInode).getLinkValue())) { dst.equals(((INodeSymlink)srcInode).getSymlinkString())) {
throw new FileAlreadyExistsException( throw new FileAlreadyExistsException(
"Cannot rename symlink "+src+" to its target "+dst); "Cannot rename symlink "+src+" to its target "+dst);
} }
@ -662,7 +662,7 @@ public class FSDirectory implements Closeable {
"The source "+src+" and destination "+dst+" are the same"); "The source "+src+" and destination "+dst+" are the same");
} }
if (srcInode.isSymlink() && if (srcInode.isSymlink() &&
dst.equals(((INodeSymlink)srcInode).getLinkValue())) { dst.equals(((INodeSymlink)srcInode).getSymlinkString())) {
throw new FileAlreadyExistsException( throw new FileAlreadyExistsException(
"Cannot rename symlink "+src+" to its target "+dst); "Cannot rename symlink "+src+" to its target "+dst);
} }
@ -701,16 +701,17 @@ public class FSDirectory implements Closeable {
+ error); + error);
throw new FileAlreadyExistsException(error); throw new FileAlreadyExistsException(error);
} }
List<INode> children = dstInode.isDirectory() ? if (dstInode.isDirectory()) {
((INodeDirectory) dstInode).getChildren() : null; final List<INode> children = ((INodeDirectory) dstInode
if (children != null && children.size() != 0) { ).getChildrenList();
error = "rename cannot overwrite non empty destination directory " if (!children.isEmpty()) {
+ dst; error = "rename destination directory is not empty: " + dst;
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " NameNode.stateChangeLog.warn(
+ error); "DIR* FSDirectory.unprotectedRenameTo: " + error);
throw new IOException(error); throw new IOException(error);
} }
} }
}
if (dstInodes[dstInodes.length - 2] == null) { if (dstInodes[dstInodes.length - 2] == null) {
error = "rename destination parent " + dst + " not found."; error = "rename destination parent " + dst + " not found.";
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: " NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
@ -1172,7 +1173,7 @@ public class FSDirectory implements Closeable {
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing]; HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
for (int i=0; i<numOfListing; i++) { for (int i=0; i<numOfListing; i++) {
INode cur = contents.get(startChild+i); INode cur = contents.get(startChild+i);
listing[i] = createFileStatus(cur.name, cur, needLocation); listing[i] = createFileStatus(cur.getLocalNameBytes(), cur, needLocation);
} }
return new DirectoryListing( return new DirectoryListing(
listing, totalNumChildren-startChild-numOfListing); listing, totalNumChildren-startChild-numOfListing);
@ -1360,7 +1361,7 @@ public class FSDirectory implements Closeable {
for(int i = 0; i < numOfINodes; i++) { for(int i = 0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
node.updateNumItemsInTree(nsDelta, dsDelta); node.addSpaceConsumed(nsDelta, dsDelta);
} }
} }
} }
@ -1393,7 +1394,7 @@ public class FSDirectory implements Closeable {
for(int i=0; i < numOfINodes; i++) { for(int i=0; i < numOfINodes; i++) {
if (inodes[i].isQuotaSet()) { // a directory with quota if (inodes[i].isQuotaSet()) { // a directory with quota
INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i];
node.unprotectedUpdateNumItemsInTree(nsDelta, dsDelta); node.addSpaceConsumed(nsDelta, dsDelta);
} }
} }
} }

View File

@ -307,7 +307,8 @@ class FSImageFormat {
INode newNode = loadINode(in); // read rest of inode INode newNode = loadINode(in); // read rest of inode
// add to parent // add to parent
namesystem.dir.addToParent(localName, parent, newNode, false); newNode.setLocalName(localName);
namesystem.dir.addToParent(parent, newNode, false);
} }
return numChildren; return numChildren;
} }
@ -341,8 +342,8 @@ class FSImageFormat {
} }
// add new inode // add new inode
parentINode = fsDir.addToParent(pathComponents[pathComponents.length-1], newNode.setLocalName(pathComponents[pathComponents.length-1]);
parentINode, newNode, false); parentINode = fsDir.addToParent(parentINode, newNode, false);
} }
} }
@ -580,8 +581,8 @@ class FSImageFormat {
private void saveImage(ByteBuffer currentDirName, private void saveImage(ByteBuffer currentDirName,
INodeDirectory current, INodeDirectory current,
DataOutputStream out) throws IOException { DataOutputStream out) throws IOException {
List<INode> children = current.getChildren(); final List<INode> children = current.getChildrenList();
if (children == null || children.isEmpty()) if (children.isEmpty())
return; return;
// print prefix (parent directory name) // print prefix (parent directory name)
int prefixLen = currentDirName.position(); int prefixLen = currentDirName.position();

View File

@ -168,7 +168,7 @@ public class FSImageSerialization {
out.writeLong(0); // access time out.writeLong(0); // access time
out.writeLong(0); // preferred block size out.writeLong(0); // preferred block size
out.writeInt(-2); // # of blocks out.writeInt(-2); // # of blocks
Text.writeString(out, ((INodeSymlink)node).getLinkValue()); Text.writeString(out, ((INodeSymlink)node).getSymlinkString());
filePerm.fromShort(node.getFsPermissionShort()); filePerm.fromShort(node.getFsPermissionShort());
PermissionStatus.write(out, node.getUserName(), PermissionStatus.write(out, node.getUserName(),
node.getGroupName(), node.getGroupName(),

View File

@ -45,23 +45,12 @@ import com.google.common.primitives.SignedBytes;
@InterfaceAudience.Private @InterfaceAudience.Private
abstract class INode implements Comparable<byte[]> { abstract class INode implements Comparable<byte[]> {
static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>()); static final List<INode> EMPTY_LIST = Collections.unmodifiableList(new ArrayList<INode>());
/**
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
* if this encoding is changed, implicitly getFileInfo and listStatus in
* clientProtocol are changed; The decoding at the client
* side should change accordingly.
*/
protected byte[] name;
protected INodeDirectory parent;
protected long modificationTime;
protected long accessTime;
/** Simple wrapper for two counters : /** Wrapper of two counters for namespace consumed and diskspace consumed. */
* nsCount (namespace consumed) and dsCount (diskspace consumed).
*/
static class DirCounts { static class DirCounts {
/** namespace count */
long nsCount = 0; long nsCount = 0;
/** diskspace count */
long dsCount = 0; long dsCount = 0;
/** returns namespace count */ /** returns namespace count */
@ -74,10 +63,6 @@ abstract class INode implements Comparable<byte[]> {
} }
} }
//Only updated by updatePermissionStatus(...).
//Other codes should not modify it.
private long permission;
private static enum PermissionStatusFormat { private static enum PermissionStatusFormat {
MODE(0, 16), MODE(0, 16),
GROUP(MODE.OFFSET + MODE.LENGTH, 25), GROUP(MODE.OFFSET + MODE.LENGTH, 25),
@ -100,31 +85,67 @@ abstract class INode implements Comparable<byte[]> {
long combine(long bits, long record) { long combine(long bits, long record) {
return (record & ~MASK) | (bits << OFFSET); return (record & ~MASK) | (bits << OFFSET);
} }
/** Set the {@link PermissionStatus} */
static long toLong(PermissionStatus ps) {
long permission = 0L;
final int user = SerialNumberManager.INSTANCE.getUserSerialNumber(
ps.getUserName());
permission = PermissionStatusFormat.USER.combine(user, permission);
final int group = SerialNumberManager.INSTANCE.getGroupSerialNumber(
ps.getGroupName());
permission = PermissionStatusFormat.GROUP.combine(group, permission);
final int mode = ps.getPermission().toShort();
permission = PermissionStatusFormat.MODE.combine(mode, permission);
return permission;
}
} }
INode(PermissionStatus permissions, long mTime, long atime) { /**
this.name = null; * The inode name is in java UTF8 encoding;
this.parent = null; * The name in HdfsFileStatus should keep the same encoding as this.
this.modificationTime = mTime; * if this encoding is changed, implicitly getFileInfo and listStatus in
setAccessTime(atime); * clientProtocol are changed; The decoding at the client
setPermissionStatus(permissions); * side should change accordingly.
*/
private byte[] name = null;
/**
* Permission encoded using PermissionStatusFormat.
* Codes other than {@link #updatePermissionStatus(PermissionStatusFormat, long)}.
* should not modify it.
*/
private long permission = 0L;
protected INodeDirectory parent = null;
protected long modificationTime = 0L;
protected long accessTime = 0L;
private INode(byte[] name, long permission, INodeDirectory parent,
long modificationTime, long accessTime) {
this.name = name;
this.permission = permission;
this.parent = parent;
this.modificationTime = modificationTime;
this.accessTime = accessTime;
}
INode(byte[] name, PermissionStatus permissions, INodeDirectory parent,
long modificationTime, long accessTime) {
this(name, PermissionStatusFormat.toLong(permissions), parent,
modificationTime, accessTime);
}
INode(PermissionStatus permissions, long mtime, long atime) {
this(null, permissions, null, mtime, atime);
} }
protected INode(String name, PermissionStatus permissions) { protected INode(String name, PermissionStatus permissions) {
this(permissions, 0L, 0L); this(DFSUtil.string2Bytes(name), permissions, null, 0L, 0L);
setLocalName(name);
} }
/** copy constructor /** @param other Other node to be copied */
*
* @param other Other node to be copied
*/
INode(INode other) { INode(INode other) {
setLocalName(other.getLocalName()); this(other.getLocalNameBytes(), other.permission, other.getParent(),
this.parent = other.getParent(); other.getModificationTime(), other.getAccessTime());
setPermissionStatus(other.getPermissionStatus());
setModificationTime(other.getModificationTime());
setAccessTime(other.getAccessTime());
} }
/** /**

View File

@ -50,22 +50,19 @@ class INodeDirectory extends INode {
protected static final int DEFAULT_FILES_PER_DIRECTORY = 5; protected static final int DEFAULT_FILES_PER_DIRECTORY = 5;
final static String ROOT_NAME = ""; final static String ROOT_NAME = "";
private List<INode> children; private List<INode> children = null;
INodeDirectory(String name, PermissionStatus permissions) { INodeDirectory(String name, PermissionStatus permissions) {
super(name, permissions); super(name, permissions);
this.children = null;
} }
public INodeDirectory(PermissionStatus permissions, long mTime) { public INodeDirectory(PermissionStatus permissions, long mTime) {
super(permissions, mTime, 0); super(permissions, mTime, 0);
this.children = null;
} }
/** constructor */ /** constructor */
INodeDirectory(byte[] localName, PermissionStatus permissions, long mTime) { INodeDirectory(byte[] name, PermissionStatus permissions, long mtime) {
this(permissions, mTime); super(name, permissions, null, mtime, 0L);
this.name = localName;
} }
/** copy constructor /** copy constructor
@ -74,7 +71,7 @@ class INodeDirectory extends INode {
*/ */
INodeDirectory(INodeDirectory other) { INodeDirectory(INodeDirectory other) {
super(other); super(other);
this.children = other.getChildren(); this.children = other.children;
} }
/** @return true unconditionally. */ /** @return true unconditionally. */
@ -83,25 +80,30 @@ class INodeDirectory extends INode {
return true; return true;
} }
INode removeChild(INode node) { private void assertChildrenNonNull() {
assert children != null; if (children == null) {
int low = Collections.binarySearch(children, node.name); throw new AssertionError("children is null: " + this);
if (low >= 0) {
return children.remove(low);
} else {
return null;
} }
} }
private int searchChildren(INode inode) {
return Collections.binarySearch(children, inode.getLocalNameBytes());
}
INode removeChild(INode node) {
assertChildrenNonNull();
final int i = searchChildren(node);
return i >= 0? children.remove(i): null;
}
/** Replace a child that has the same name as newChild by newChild. /** Replace a child that has the same name as newChild by newChild.
* *
* @param newChild Child node to be added * @param newChild Child node to be added
*/ */
void replaceChild(INode newChild) { void replaceChild(INode newChild) {
if ( children == null ) { assertChildrenNonNull();
throw new IllegalArgumentException("The directory is empty");
} final int low = searchChildren(newChild);
int low = Collections.binarySearch(children, newChild.name);
if (low>=0) { // an old child exists so replace by the newChild if (low>=0) { // an old child exists so replace by the newChild
children.set(low, newChild); children.set(low, newChild);
} else { } else {
@ -210,7 +212,7 @@ class INodeDirectory extends INode {
final String remainder = final String remainder =
constructPath(components, count + 1, components.length); constructPath(components, count + 1, components.length);
final String link = DFSUtil.bytes2String(components[count]); final String link = DFSUtil.bytes2String(components[count]);
final String target = ((INodeSymlink)curNode).getLinkValue(); final String target = ((INodeSymlink)curNode).getSymlinkString();
if (NameNode.stateChangeLog.isDebugEnabled()) { if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("UnresolvedPathException " + NameNode.stateChangeLog.debug("UnresolvedPathException " +
" path: " + path + " preceding: " + preceding + " path: " + path + " preceding: " + preceding +
@ -284,7 +286,7 @@ class INodeDirectory extends INode {
if (children == null) { if (children == null) {
children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY); children = new ArrayList<INode>(DEFAULT_FILES_PER_DIRECTORY);
} }
int low = Collections.binarySearch(children, node.name); final int low = searchChildren(node);
if(low >= 0) if(low >= 0)
return null; return null;
node.parent = this; node.parent = this;
@ -324,13 +326,9 @@ class INodeDirectory extends INode {
* @throws FileNotFoundException if parent does not exist or * @throws FileNotFoundException if parent does not exist or
* is not a directory. * is not a directory.
*/ */
INodeDirectory addToParent( byte[] localname, INodeDirectory addToParent(INode newNode, INodeDirectory parent,
INode newNode, boolean propagateModTime) throws FileNotFoundException {
INodeDirectory parent,
boolean propagateModTime
) throws FileNotFoundException {
// insert into the parent children list // insert into the parent children list
newNode.name = localname;
if(parent.addChild(newNode, propagateModTime) == null) if(parent.addChild(newNode, propagateModTime) == null)
return null; return null;
return parent; return parent;
@ -368,7 +366,7 @@ class INodeDirectory extends INode {
if (pathComponents.length < 2) { // add root if (pathComponents.length < 2) { // add root
return null; return null;
} }
newNode.name = pathComponents[pathComponents.length - 1]; newNode.setLocalName(pathComponents[pathComponents.length - 1]);
// insert into the parent children list // insert into the parent children list
INodeDirectory parent = getParent(pathComponents); INodeDirectory parent = getParent(pathComponents);
return parent.addChild(newNode, propagateModTime) == null? null: parent; return parent.addChild(newNode, propagateModTime) == null? null: parent;
@ -424,10 +422,6 @@ class INodeDirectory extends INode {
public List<INode> getChildrenList() { public List<INode> getChildrenList() {
return children==null ? EMPTY_LIST : children; return children==null ? EMPTY_LIST : children;
} }
/** @return the children list which is possibly null. */
public List<INode> getChildren() {
return children;
}
@Override @Override
int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) { int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {

View File

@ -27,9 +27,9 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
*/ */
class INodeDirectoryWithQuota extends INodeDirectory { class INodeDirectoryWithQuota extends INodeDirectory {
private long nsQuota; /// NameSpace quota private long nsQuota; /// NameSpace quota
private long nsCount; private long nsCount = 1L;
private long dsQuota; /// disk space quota private long dsQuota; /// disk space quota
private long diskspace; private long diskspace = 0L;
/** Convert an existing directory inode to one with the given quota /** Convert an existing directory inode to one with the given quota
* *
@ -44,7 +44,8 @@ class INodeDirectoryWithQuota extends INodeDirectory {
other.spaceConsumedInTree(counts); other.spaceConsumedInTree(counts);
this.nsCount = counts.getNsCount(); this.nsCount = counts.getNsCount();
this.diskspace = counts.getDsCount(); this.diskspace = counts.getDsCount();
setQuota(nsQuota, dsQuota); this.nsQuota = nsQuota;
this.dsQuota = dsQuota;
} }
/** constructor with no quota verification */ /** constructor with no quota verification */
@ -53,7 +54,6 @@ class INodeDirectoryWithQuota extends INodeDirectory {
super(permissions, modificationTime); super(permissions, modificationTime);
this.nsQuota = nsQuota; this.nsQuota = nsQuota;
this.dsQuota = dsQuota; this.dsQuota = dsQuota;
this.nsCount = 1;
} }
/** constructor with no quota verification */ /** constructor with no quota verification */
@ -62,7 +62,6 @@ class INodeDirectoryWithQuota extends INodeDirectory {
super(name, permissions); super(name, permissions);
this.nsQuota = nsQuota; this.nsQuota = nsQuota;
this.dsQuota = dsQuota; this.dsQuota = dsQuota;
this.nsCount = 1;
} }
/** Get this directory's namespace quota /** Get this directory's namespace quota
@ -116,19 +115,8 @@ class INodeDirectoryWithQuota extends INodeDirectory {
* @param nsDelta the change of the tree size * @param nsDelta the change of the tree size
* @param dsDelta change to disk space occupied * @param dsDelta change to disk space occupied
*/ */
void updateNumItemsInTree(long nsDelta, long dsDelta) { void addSpaceConsumed(long nsDelta, long dsDelta) {
nsCount += nsDelta; setSpaceConsumed(nsCount + nsDelta, diskspace + dsDelta);
diskspace += dsDelta;
}
/** Update the size of the tree
*
* @param nsDelta the change of the tree size
* @param dsDelta change to disk space occupied
**/
void unprotectedUpdateNumItemsInTree(long nsDelta, long dsDelta) {
nsCount = nsCount + nsDelta;
diskspace = diskspace + dsDelta;
} }
/** /**

View File

@ -45,14 +45,43 @@ class INodeFile extends INode implements BlockCollection {
static final FsPermission UMASK = FsPermission.createImmutable((short)0111); static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
//Number of bits for Block size
static final short BLOCKBITS = 48;
//Header mask 64-bit representation /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
//Format: [16 bits for replication][48 bits for PreferredBlockSize] private static class HeaderFormat {
/** Number of bits for Block size */
static final int BLOCKBITS = 48;
/** Header mask 64-bit representation */
static final long HEADERMASK = 0xffffL << BLOCKBITS; static final long HEADERMASK = 0xffffL << BLOCKBITS;
static final long MAX_BLOCK_SIZE = ~HEADERMASK;
private long header; static short getReplication(long header) {
return (short) ((header & HEADERMASK) >> BLOCKBITS);
}
static long combineReplication(long header, short replication) {
if (replication <= 0) {
throw new IllegalArgumentException(
"Unexpected value for the replication: " + replication);
}
return ((long)replication << BLOCKBITS) | (header & MAX_BLOCK_SIZE);
}
static long getPreferredBlockSize(long header) {
return header & MAX_BLOCK_SIZE;
}
static long combinePreferredBlockSize(long header, long blockSize) {
if (blockSize < 0) {
throw new IllegalArgumentException("Block size < 0: " + blockSize);
} else if (blockSize > MAX_BLOCK_SIZE) {
throw new IllegalArgumentException("Block size = " + blockSize
+ " > MAX_BLOCK_SIZE = " + MAX_BLOCK_SIZE);
}
return (header & HEADERMASK) | (blockSize & MAX_BLOCK_SIZE);
}
}
private long header = 0L;
private BlockInfo[] blocks; private BlockInfo[] blocks;
@ -60,8 +89,8 @@ class INodeFile extends INode implements BlockCollection {
short replication, long modificationTime, short replication, long modificationTime,
long atime, long preferredBlockSize) { long atime, long preferredBlockSize) {
super(permissions, modificationTime, atime); super(permissions, modificationTime, atime);
this.setReplication(replication); header = HeaderFormat.combineReplication(header, replication);
this.setPreferredBlockSize(preferredBlockSize); header = HeaderFormat.combinePreferredBlockSize(header, preferredBlockSize);
this.blocks = blklist; this.blocks = blklist;
} }
@ -78,25 +107,17 @@ class INodeFile extends INode implements BlockCollection {
/** @return the replication factor of the file. */ /** @return the replication factor of the file. */
@Override @Override
public short getBlockReplication() { public short getBlockReplication() {
return (short) ((header & HEADERMASK) >> BLOCKBITS); return HeaderFormat.getReplication(header);
} }
void setReplication(short replication) { void setReplication(short replication) {
if(replication <= 0) header = HeaderFormat.combineReplication(header, replication);
throw new IllegalArgumentException("Unexpected value for the replication");
header = ((long)replication << BLOCKBITS) | (header & ~HEADERMASK);
} }
/** @return preferred block size (in bytes) of the file. */ /** @return preferred block size (in bytes) of the file. */
@Override @Override
public long getPreferredBlockSize() { public long getPreferredBlockSize() {
return header & ~HEADERMASK; return HeaderFormat.getPreferredBlockSize(header);
}
private void setPreferredBlockSize(long preferredBlkSize) {
if((preferredBlkSize < 0) || (preferredBlkSize > ~HEADERMASK ))
throw new IllegalArgumentException("Unexpected value for the block size");
header = (header & HEADERMASK) | (preferredBlkSize & ~HEADERMASK);
} }
/** @return the blocks of the file. */ /** @return the blocks of the file. */

View File

@ -22,19 +22,16 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
/** /**
* An INode representing a symbolic link. * An {@link INode} representing a symbolic link.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class INodeSymlink extends INode { public class INodeSymlink extends INode {
private byte[] symlink; // The target URI private final byte[] symlink; // The target URI
INodeSymlink(String value, long modTime, long atime, INodeSymlink(String value, long mtime, long atime,
PermissionStatus permissions) { PermissionStatus permissions) {
super(permissions, modTime, atime); super(permissions, mtime, atime);
assert value != null; this.symlink = DFSUtil.string2Bytes(value);
setLinkValue(value);
setModificationTimeForce(modTime);
setAccessTime(atime);
} }
@Override @Override
@ -42,11 +39,7 @@ public class INodeSymlink extends INode {
return true; return true;
} }
void setLinkValue(String value) { public String getSymlinkString() {
this.symlink = DFSUtil.string2Bytes(value);
}
public String getLinkValue() {
return DFSUtil.bytes2String(symlink); return DFSUtil.bytes2String(symlink);
} }

View File

@ -130,6 +130,16 @@ public class TestFSDirectory {
Assert.assertTrue(diff.contains(file4.getName())); Assert.assertTrue(diff.contains(file4.getName()));
} }
@Test
public void testReset() throws Exception {
fsdir.reset();
Assert.assertFalse(fsdir.isReady());
final INodeDirectory root = (INodeDirectory) fsdir.getINode("/");
Assert.assertTrue(root.getChildrenList().isEmpty());
fsdir.imageLoadComplete();
Assert.assertTrue(fsdir.isReady());
}
static void checkClassName(String line) { static void checkClassName(String line) {
int i = line.lastIndexOf('('); int i = line.lastIndexOf('(');
int j = line.lastIndexOf('@'); int j = line.lastIndexOf('@');