HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and initialize root directory as snapshottable.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1405648 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-11-04 22:00:13 +00:00
parent 554fb4d2b2
commit deaf979d41
4 changed files with 24 additions and 17 deletions

View File

@ -47,3 +47,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4141. Support directory diff - the difference between the current state
and a previous snapshot of an INodeDirectory. (szetszwo)
HDFS-4146. Use getter and setter in INodeFileWithLink to access blocks and
initialize root directory as snapshottable. (szetszwo)

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.util.ByteArray;
import com.google.common.base.Preconditions;
@ -120,9 +121,10 @@ public class FSDirectory implements Closeable {
FSDirectory(FSImage fsImage, FSNamesystem ns, Configuration conf) {
this.dirLock = new ReentrantReadWriteLock(true); // fair
this.cond = dirLock.writeLock().newCondition();
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
ns.createFsOwnerPermissions(new FsPermission((short)0755)),
Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
this.namesystem = ns;
reset();
this.fsImage = fsImage;
int configuredLimit = conf.getInt(
DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
@ -143,7 +145,6 @@ public class FSDirectory implements Closeable {
NameNode.LOG.info("Caching file names occuring more than " + threshold
+ " times");
nameCache = new NameCache<ByteArray>(threshold);
namesystem = ns;
}
private FSNamesystem getFSNamesystem() {
@ -2030,9 +2031,11 @@ public class FSDirectory implements Closeable {
* Reset the entire namespace tree.
*/
void reset() {
rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
final INodeDirectoryWithQuota r = new INodeDirectoryWithQuota(
INodeDirectory.ROOT_NAME,
getFSNamesystem().createFsOwnerPermissions(new FsPermission((short)0755)),
Integer.MAX_VALUE, -1);
Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
rootDir = INodeDirectorySnapshottable.newInstance(r, 0);
}
/**

View File

@ -55,7 +55,7 @@ public class INodeFile extends INode implements BlockCollection {
private long header;
protected BlockInfo[] blocks;
private BlockInfo[] blocks;
INodeFile(PermissionStatus permissions, BlockInfo[] blklist,
short replication, long modificationTime,

View File

@ -107,35 +107,36 @@ public class INodeFileWithLink extends INodeFile {
}
private void collectBlocksBeyondMaxAndClear(final long max, final List<Block> v) {
if (blocks != null) {
final BlockInfo[] oldBlocks = getBlocks();
if (oldBlocks != null) {
//find the minimum n such that the size of the first n blocks > max
int n = 0;
for(long size = 0; n < blocks.length && max > size; n++) {
size += blocks[n].getNumBytes();
for(long size = 0; n < oldBlocks.length && max > size; n++) {
size += oldBlocks[n].getNumBytes();
}
//starting from block[n], the data is beyond max.
if (n < blocks.length) {
//starting from block n, the data is beyond max.
if (n < oldBlocks.length) {
//resize the array.
final BlockInfo[] newBlocks;
if (n == 0) {
newBlocks = null;
} else {
newBlocks = new BlockInfo[n];
System.arraycopy(blocks, 0, newBlocks, 0, n);
System.arraycopy(oldBlocks, 0, newBlocks, 0, n);
}
for(INodeFileWithLink i = next; i != this; i = i.getNext()) {
i.blocks = newBlocks;
i.setBlocks(newBlocks);
}
//collect the blocks beyond max.
if (v != null) {
for(; n < blocks.length; n++) {
v.add(blocks[n]);
for(; n < oldBlocks.length; n++) {
v.add(oldBlocks[n]);
}
}
}
blocks = null;
setBlocks(null);
}
}
}