HDFS-4772. Add number of children in HdfsFileStatus. Contributed by Brandon Li
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1495253 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1a06175440
commit
6ecf78a99b
|
@ -122,6 +122,8 @@ Trunk (Unreleased)
|
|||
|
||||
HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth)
|
||||
|
||||
HDFS-4772. Add number of children in HdfsFileStatus. (brandonli)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
|
|
@ -42,6 +42,9 @@ public class HdfsFileStatus {
|
|||
private String group;
|
||||
private long fileId;
|
||||
|
||||
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||
private int childrenNum;
|
||||
|
||||
public static final byte[] EMPTY_NAME = new byte[0];
|
||||
|
||||
/**
|
||||
|
@ -61,7 +64,7 @@ public class HdfsFileStatus {
|
|||
public HdfsFileStatus(long length, boolean isdir, int block_replication,
|
||||
long blocksize, long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId) {
|
||||
byte[] symlink, byte[] path, long fileId, int childrenNum) {
|
||||
this.length = length;
|
||||
this.isdir = isdir;
|
||||
this.block_replication = (short)block_replication;
|
||||
|
@ -78,6 +81,7 @@ public class HdfsFileStatus {
|
|||
this.symlink = symlink;
|
||||
this.path = path;
|
||||
this.fileId = fileId;
|
||||
this.childrenNum = childrenNum;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -230,4 +234,8 @@ public class HdfsFileStatus {
|
|||
final public long getFileId() {
|
||||
return fileId;
|
||||
}
|
||||
|
||||
final public int getChildrenNum() {
|
||||
return childrenNum;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,9 +50,11 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
|||
public HdfsLocatedFileStatus(long length, boolean isdir,
|
||||
int block_replication, long blocksize, long modification_time,
|
||||
long access_time, FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations) {
|
||||
byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
|
||||
int childrenNum) {
|
||||
super(length, isdir, block_replication, blocksize, modification_time,
|
||||
access_time, permission, owner, group, symlink, path, fileId);
|
||||
access_time, permission, owner, group, symlink, path, fileId,
|
||||
childrenNum);
|
||||
this.locations = locations;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,10 +57,11 @@ public class SnapshottableDirectoryStatus {
|
|||
|
||||
public SnapshottableDirectoryStatus(long modification_time, long access_time,
|
||||
FsPermission permission, String owner, String group, byte[] localName,
|
||||
long inodeId,
|
||||
long inodeId, int childrenNum,
|
||||
int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
|
||||
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
|
||||
access_time, permission, owner, group, null, localName, inodeId);
|
||||
access_time, permission, owner, group, null, localName, inodeId,
|
||||
childrenNum);
|
||||
this.snapshotNumber = snapshotNumber;
|
||||
this.snapshotQuota = snapshotQuota;
|
||||
this.parentFullPath = parentFullPath;
|
||||
|
|
|
@ -1055,7 +1055,8 @@ public class PBHelper {
|
|||
fs.getSymlink().toByteArray() : null,
|
||||
fs.getPath().toByteArray(),
|
||||
fs.hasFileId()? fs.getFileId(): INodeId.GRANDFATHER_INODE_ID,
|
||||
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null);
|
||||
fs.hasLocations() ? PBHelper.convert(fs.getLocations()) : null,
|
||||
fs.hasChildrenNum() ? fs.getChildrenNum() : 0);
|
||||
}
|
||||
|
||||
public static SnapshottableDirectoryStatus convert(
|
||||
|
@ -1072,6 +1073,7 @@ public class PBHelper {
|
|||
status.getGroup(),
|
||||
status.getPath().toByteArray(),
|
||||
status.getFileId(),
|
||||
status.getChildrenNum(),
|
||||
sdirStatusProto.getSnapshotNumber(),
|
||||
sdirStatusProto.getSnapshotQuota(),
|
||||
sdirStatusProto.getParentFullpath().toByteArray());
|
||||
|
@ -1099,6 +1101,7 @@ public class PBHelper {
|
|||
setOwner(fs.getOwner()).
|
||||
setGroup(fs.getGroup()).
|
||||
setFileId(fs.getFileId()).
|
||||
setChildrenNum(fs.getChildrenNum()).
|
||||
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
|
||||
if (fs.isSymlink()) {
|
||||
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
|
||||
|
|
|
@ -1582,6 +1582,13 @@ public class FSDirectory implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Currently we only support "ls /xxx/.snapshot" which will return all the
|
||||
* snapshots of a directory. The FSCommand Ls will first call getFileInfo to
|
||||
* make sure the file/directory exists (before the real getListing call).
|
||||
* Since we do not have a real INode for ".snapshot", we return an empty
|
||||
* non-null HdfsFileStatus here.
|
||||
*/
|
||||
private HdfsFileStatus getFileInfo4DotSnapshot(String src)
|
||||
throws UnresolvedLinkException {
|
||||
Preconditions.checkArgument(
|
||||
|
@ -1596,7 +1603,7 @@ public class FSDirectory implements Closeable {
|
|||
&& node.isDirectory()
|
||||
&& node.asDirectory() instanceof INodeDirectorySnapshottable) {
|
||||
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
|
||||
HdfsFileStatus.EMPTY_NAME, -1L);
|
||||
HdfsFileStatus.EMPTY_NAME, -1L, 0);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -2521,6 +2528,9 @@ public class FSDirectory implements Closeable {
|
|||
replication = fileNode.getFileReplication(snapshot);
|
||||
blocksize = fileNode.getPreferredBlockSize();
|
||||
}
|
||||
int childrenNum = node.isDirectory() ?
|
||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||
|
||||
return new HdfsFileStatus(
|
||||
size,
|
||||
node.isDirectory(),
|
||||
|
@ -2533,7 +2543,8 @@ public class FSDirectory implements Closeable {
|
|||
node.getGroupName(snapshot),
|
||||
node.isSymlink() ? node.asSymlink().getSymlink() : null,
|
||||
path,
|
||||
node.getId());
|
||||
node.getId(),
|
||||
childrenNum);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2563,12 +2574,15 @@ public class FSDirectory implements Closeable {
|
|||
loc = new LocatedBlocks();
|
||||
}
|
||||
}
|
||||
int childrenNum = node.isDirectory() ?
|
||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||
|
||||
return new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
|
||||
blocksize, node.getModificationTime(snapshot),
|
||||
node.getAccessTime(snapshot), node.getFsPermission(snapshot),
|
||||
node.getUserName(snapshot), node.getGroupName(snapshot),
|
||||
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
|
||||
node.getId(), loc);
|
||||
node.getId(), loc, childrenNum);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -655,4 +655,8 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
this(snapshot, snapshot.getRoot());
|
||||
}
|
||||
}
|
||||
|
||||
public final int getChildrenNum(final Snapshot snapshot) {
|
||||
return getChildrenList(snapshot).size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,6 +130,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
}
|
||||
|
||||
/** Get inode id */
|
||||
@Override
|
||||
public final long getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
@ -226,6 +227,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
|
||||
|
||||
/** Update modification time if it is larger than the current value. */
|
||||
@Override
|
||||
public final INode updateModificationTime(long mtime, Snapshot latest,
|
||||
final INodeMap inodeMap) throws QuotaExceededException {
|
||||
Preconditions.checkState(isDirectory());
|
||||
|
@ -256,6 +258,7 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
/**
|
||||
* Set last access time of inode.
|
||||
*/
|
||||
@Override
|
||||
public final void setAccessTime(long accessTime) {
|
||||
this.accessTime = accessTime;
|
||||
}
|
||||
|
|
|
@ -325,7 +325,8 @@ public class SnapshotManager implements SnapshotStats {
|
|||
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
|
||||
dir.getModificationTime(), dir.getAccessTime(),
|
||||
dir.getFsPermission(), dir.getUserName(), dir.getGroupName(),
|
||||
dir.getLocalNameBytes(), dir.getId(), dir.getNumSnapshots(),
|
||||
dir.getLocalNameBytes(), dir.getId(), dir.getChildrenNum(null),
|
||||
dir.getNumSnapshots(),
|
||||
dir.getSnapshotQuota(), dir.getParent() == null ?
|
||||
DFSUtil.EMPTY_BYTES :
|
||||
DFSUtil.string2Bytes(dir.getParent().getFullPathName()));
|
||||
|
|
|
@ -221,6 +221,7 @@ public class JsonUtil {
|
|||
m.put("blockSize", status.getBlockSize());
|
||||
m.put("replication", status.getReplication());
|
||||
m.put("fileId", status.getFileId());
|
||||
m.put("childrenNum", status.getChildrenNum());
|
||||
return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
|
||||
}
|
||||
|
||||
|
@ -247,9 +248,10 @@ public class JsonUtil {
|
|||
final short replication = (short) (long) (Long) m.get("replication");
|
||||
final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
|
||||
: INodeId.GRANDFATHER_INODE_ID;
|
||||
final int childrenNum = (int) (long) (Long) m.get("childrenNum");
|
||||
return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
|
||||
blockSize, mTime, aTime, permission, owner, group,
|
||||
symlink, DFSUtil.string2Bytes(localName), fileId);
|
||||
symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum);
|
||||
}
|
||||
|
||||
/** Convert an ExtendedBlock to a Json map. */
|
||||
|
|
|
@ -173,6 +173,7 @@ message HdfsFileStatusProto {
|
|||
|
||||
// Optional field for fileId
|
||||
optional uint64 fileId = 13 [default = 0]; // default as an invalid id
|
||||
optional uint32 childrenNum = 14 [default = 0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -252,12 +252,12 @@ public class TestDFSClientRetries {
|
|||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010)).when(mockNN).getFileInfo(anyString());
|
||||
1010, 0)).when(mockNN).getFileInfo(anyString());
|
||||
|
||||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010))
|
||||
1010, 0))
|
||||
.when(mockNN)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
|
|
|
@ -24,7 +24,6 @@ import static org.junit.Assert.fail;
|
|||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -37,7 +36,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
|
@ -106,6 +104,16 @@ public class TestFileStatus {
|
|||
HdfsFileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
|
||||
assertEquals("Non-existant file should result in null", null, fileInfo);
|
||||
|
||||
Path path1 = new Path("/name1");
|
||||
Path path2 = new Path("/name1/name2");
|
||||
assertTrue(fs.mkdirs(path1));
|
||||
FSDataOutputStream out = fs.create(path2, false);
|
||||
out.close();
|
||||
fileInfo = dfsClient.getFileInfo(path1.toString());
|
||||
assertEquals(1, fileInfo.getChildrenNum());
|
||||
fileInfo = dfsClient.getFileInfo(path2.toString());
|
||||
assertEquals(0, fileInfo.getChildrenNum());
|
||||
|
||||
// Test getFileInfo throws the right exception given a non-absolute path.
|
||||
try {
|
||||
dfsClient.getFileInfo("non-absolute");
|
||||
|
|
|
@ -294,12 +294,12 @@ public class TestLease {
|
|||
Mockito.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010)).when(mcp).getFileInfo(anyString());
|
||||
1010, 0)).when(mcp).getFileInfo(anyString());
|
||||
Mockito
|
||||
.doReturn(
|
||||
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
|
||||
(short) 777), "owner", "group", new byte[0], new byte[0],
|
||||
1010))
|
||||
1010, 0))
|
||||
.when(mcp)
|
||||
.create(anyString(), (FsPermission) anyObject(), anyString(),
|
||||
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
|
||||
|
|
|
@ -46,7 +46,7 @@ public class TestJsonUtil {
|
|||
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
|
||||
now, now + 10, new FsPermission((short) 0644), "user", "group",
|
||||
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
|
||||
INodeId.GRANDFATHER_INODE_ID);
|
||||
INodeId.GRANDFATHER_INODE_ID, 0);
|
||||
final FileStatus fstatus = toFileStatus(status, parent);
|
||||
System.out.println("status = " + status);
|
||||
System.out.println("fstatus = " + fstatus);
|
||||
|
|
Loading…
Reference in New Issue