HDFS-12582. Replace HdfsFileStatus constructor with a builder pattern. Contributed by Bharat Viswanadham

This commit is contained in:
Chris Douglas 2017-10-27 15:36:03 -07:00
parent 665bb147aa
commit d55a84951a
11 changed files with 416 additions and 122 deletions

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
import java.io.IOException;
import java.net.URI;
import java.util.Arrays;
import java.util.EnumSet;
import org.apache.hadoop.classification.InterfaceAudience;
@ -63,12 +64,12 @@ public class HdfsFileStatus extends FileStatus {
/**
* Constructor.
* @param length the number of bytes the file has
* @param isdir if the path is a directory
* @param block_replication the replication factor
* @param blocksize the block size
* @param modification_time modification time
* @param access_time access time
* @param length the number of bytes the file has
* @param isdir if the path is a directory
* @param replication the replication factor
* @param blocksize the block size
* @param mtime modification time
* @param atime access time
* @param permission permission
* @param owner the owner of the path
* @param group the group of the path
@ -80,16 +81,18 @@ public class HdfsFileStatus extends FileStatus {
* @param storagePolicy ID which specifies storage policy
* @param ecPolicy the erasure coding policy
*/
public HdfsFileStatus(long length, boolean isdir, int block_replication,
long blocksize, long modification_time,
long access_time, FsPermission permission,
EnumSet<Flags> flags, String owner, String group,
byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo,
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
super(length, isdir, block_replication, blocksize, modification_time,
access_time, convert(isdir, symlink != null, permission, flags),
owner, group, null, null);
protected HdfsFileStatus(long length, boolean isdir, int replication,
long blocksize, long mtime, long atime,
FsPermission permission, EnumSet<Flags> flags,
String owner, String group,
byte[] symlink, byte[] path, long fileId,
int childrenNum, FileEncryptionInfo feInfo,
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
super(length, isdir, replication, blocksize, mtime,
atime, convert(isdir, symlink != null, permission, flags),
owner, group, null, null,
flags.contains(Flags.HAS_ACL), flags.contains(Flags.HAS_CRYPT),
flags.contains(Flags.HAS_EC));
this.flags = flags;
this.uSymlink = symlink;
this.uPath = path;
@ -278,6 +281,222 @@ public class HdfsFileStatus extends FileStatus {
// fully-qualify path
setPath(getFullPath(parent).makeQualified(defaultUri, null));
return this; // API compatibility
}
/**
* Builder class for HdfsFileStatus instances. Note default values for
* parameters.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static class Builder {
// Changing default values will affect cases where values are not
// specified. Be careful!
private long length = 0L;
private boolean isdir = false;
private int replication = 0;
private long blocksize = 0L;
private long mtime = 0L;
private long atime = 0L;
private FsPermission permission = null;
private EnumSet<Flags> flags = EnumSet.noneOf(Flags.class);
private String owner = null;
private String group = null;
private byte[] symlink = null;
private byte[] path = EMPTY_NAME;
private long fileId = -1L;
private int childrenNum = 0;
private FileEncryptionInfo feInfo = null;
private byte storagePolicy =
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
private ErasureCodingPolicy ecPolicy = null;
/**
* Set the length of the entity (default = 0).
* @param length Entity length
* @return This Builder instance
*/
public Builder length(long length) {
this.length = length;
return this;
}
/**
* Set the isDir flag for the entity (default = false).
* @param isdir True if the referent is a directory.
* @return This Builder instance
*/
public Builder isdir(boolean isdir) {
this.isdir = isdir;
return this;
}
/**
* Set the replication of this entity (default = 0).
* @param replication Number of replicas
* @return This Builder instance
*/
public Builder replication(int replication) {
this.replication = replication;
return this;
}
/**
* Set the blocksize of this entity (default = 0).
* @param blocksize Target, default blocksize
* @return This Builder instance
*/
public Builder blocksize(long blocksize) {
this.blocksize = blocksize;
return this;
}
/**
* Set the modification time of this entity (default = 0).
* @param mtime Last modified time
* @return This Builder instance
*/
public Builder mtime(long mtime) {
this.mtime = mtime;
return this;
}
/**
* Set the access time of this entity (default = 0).
* @param atime Last accessed time
* @return This Builder instance
*/
public Builder atime(long atime) {
this.atime = atime;
return this;
}
/**
* Set the permission mask of this entity (default = null).
* @param permission Permission bitmask
* @return This Builder instance
*/
public Builder perm(FsPermission permission) {
this.permission = permission;
return this;
}
/**
* Set {@link Flags} for this entity
* (default = {@link EnumSet#noneOf(Class)}).
* @param flags Flags
* @return This builder instance
*/
public Builder flags(EnumSet<Flags> flags) {
this.flags = flags;
return this;
}
/**
* Set the owner for this entity (default = null).
* @param owner Owner
* @return This Builder instance
*/
public Builder owner(String owner) {
this.owner = owner;
return this;
}
/**
* Set the group for this entity (default = null).
* @param group Group
* @return This Builder instance
*/
public Builder group(String group) {
this.group = group;
return this;
}
/**
* Set symlink bytes for this entity (default = null).
* @param symlink Symlink bytes (see
* {@link DFSUtilClient#bytes2String(byte[])})
* @return This Builder instance
*/
public Builder symlink(byte[] symlink) {
this.symlink = null == symlink
? null
: Arrays.copyOf(symlink, symlink.length);
return this;
}
/**
* Set path bytes for this entity (default = {@link #EMPTY_NAME}).
* @param path Path bytes (see {@link #makeQualified(URI, Path)}).
* @return This Builder instance
*/
public Builder path(byte[] path) {
this.path = null == path
? null
: Arrays.copyOf(path, path.length);
return this;
}
/**
* Set the fileId for this entity (default = -1).
* @param fileId FileId
* @return This Builder instance
*/
public Builder fileId(long fileId) {
this.fileId = fileId;
return this;
}
/**
* Set the number of children for this entity (default = 0).
* @param childrenNum Number of children
* @return This Builder instance
*/
public Builder children(int childrenNum) {
this.childrenNum = childrenNum;
return this;
}
/**
* Set the encryption info for this entity (default = null).
* @param feInfo Encryption info
* @return This Builder instance
*/
public Builder feInfo(FileEncryptionInfo feInfo) {
this.feInfo = feInfo;
return this;
}
/**
* Set the storage policy for this entity
* (default = {@link HdfsConstants#BLOCK_STORAGE_POLICY_ID_UNSPECIFIED}).
* @param storagePolicy Storage policy
* @return This Builder instance
*/
public Builder storagePolicy(byte storagePolicy) {
this.storagePolicy = storagePolicy;
return this;
}
/**
* Set the erasure coding policy for this entity (default = null).
* @param ecPolicy Erasure coding policy
* @return This Builder instance
*/
public Builder ecPolicy(ErasureCodingPolicy ecPolicy) {
this.ecPolicy = ecPolicy;
return this;
}
/**
* @return An {@link HdfsFileStatus} instance from these parameters.
*/
public HdfsFileStatus build() {
return new HdfsFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path, fileId,
childrenNum, feInfo, storagePolicy, ecPolicy);
}
}
}

View File

@ -62,10 +62,18 @@ public class SnapshottableDirectoryStatus {
String owner, String group, byte[] localName, long inodeId,
int childrenNum, int snapshotNumber, int snapshotQuota,
byte[] parentFullPath) {
this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
access_time, permission, flags, owner, group, null, localName, inodeId,
childrenNum, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
null);
this.dirStatus = new HdfsFileStatus.Builder()
.isdir(true)
.mtime(modification_time)
.atime(access_time)
.perm(permission)
.flags(flags)
.owner(owner)
.group(group)
.path(localName)
.fileId(inodeId)
.children(childrenNum)
.build();
this.snapshotNumber = snapshotNumber;
this.snapshotQuota = snapshotQuota;
this.parentFullPath = parentFullPath;

View File

@ -23,7 +23,6 @@ import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsServerDefaults;
@ -152,11 +151,23 @@ class JsonUtilClient {
final byte storagePolicy = m.containsKey("storagePolicy") ?
(byte) ((Number) m.get("storagePolicy")).longValue() :
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len,
type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize,
mTime, aTime, permission, f, owner, group, symlink,
DFSUtilClient.string2Bytes(localName), fileId, childrenNum,
null, storagePolicy, null);
return new HdfsFileStatus.Builder()
.length(len)
.isdir(type == WebHdfsConstants.PathType.DIRECTORY)
.replication(replication)
.blocksize(blockSize)
.mtime(mTime)
.atime(aTime)
.perm(permission)
.flags(f)
.owner(owner)
.group(group)
.symlink(symlink)
.path(DFSUtilClient.string2Bytes(localName))
.fileId(fileId)
.children(childrenNum)
.storagePolicy(storagePolicy)
.build();
}
static HdfsFileStatus[] toHdfsFileStatusArray(final Map<?, ?> json) {
@ -399,9 +410,9 @@ class JsonUtilClient {
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
.fileCount(fileCount).directoryCount(directoryCount).quota(quota)
.spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder()
.length(length).fileCount(fileCount).directoryCount(directoryCount)
.quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
if (typem != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());

View File

@ -2016,10 +2016,18 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
LOG.error("Cannot get the remote user: {}", e.getMessage());
}
long inodeId = 0;
return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission,
EnumSet.noneOf(HdfsFileStatus.Flags.class),
owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId,
childrenNum, null, (byte) 0, null);
return new HdfsFileStatus.Builder()
.isdir(true)
.mtime(modTime)
.atime(accessTime)
.perm(permission)
.owner(owner)
.group(group)
.symlink(new byte[0])
.path(DFSUtil.string2Bytes(name))
.fileId(inodeId)
.children(childrenNum)
.build();
}
/**
@ -2043,4 +2051,4 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
UserGroupInformation ugi = Server.getRemoteUser();
return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser();
}
}
}

View File

@ -487,9 +487,25 @@ class FSDirStatAndListingOp {
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
if (locations == null) {
return new HdfsFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path,
fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
return new HdfsFileStatus.Builder()
.length(length)
.isdir(isdir)
.replication(replication)
.blocksize(blocksize)
.mtime(mtime)
.atime(atime)
.perm(permission)
.flags(flags)
.owner(owner)
.group(group)
.symlink(symlink)
.path(path)
.fileId(fileId)
.children(childrenNum)
.feInfo(feInfo)
.storagePolicy(storagePolicy)
.ecPolicy(ecPolicy)
.build();
} else {
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
mtime, atime, permission, flags, owner, group, symlink, path,

View File

@ -73,7 +73,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -137,16 +136,15 @@ public class FSDirectory implements Closeable {
DFSUtil.string2Bytes("..");
public final static HdfsFileStatus DOT_RESERVED_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
new HdfsFileStatus.Builder()
.isdir(true)
.perm(new FsPermission((short) 01770))
.build();
public final static HdfsFileStatus DOT_SNAPSHOT_DIR_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, null,
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, null, null,
HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
new HdfsFileStatus.Builder()
.isdir(true)
.build();
INodeDirectory rootDir;
private final FSNamesystem namesystem;
@ -434,16 +432,22 @@ public class FSDirectory implements Closeable {
* @return Array of HdfsFileStatus
*/
void createReservedStatuses(long cTime) {
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770),
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
DOT_INODES, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770),
EnumSet.noneOf(HdfsFileStatus.Flags.class), null, supergroup, null,
RAW, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
HdfsFileStatus inodes = new HdfsFileStatus.Builder()
.isdir(true)
.mtime(cTime)
.atime(cTime)
.perm(new FsPermission((short) 0770))
.group(supergroup)
.path(DOT_INODES)
.build();
HdfsFileStatus raw = new HdfsFileStatus.Builder()
.isdir(true)
.mtime(cTime)
.atime(cTime)
.perm(new FsPermission((short) 0770))
.group(supergroup)
.path(RAW)
.build();
reservedStatuses = new HdfsFileStatus[] {inodes, raw};
}

View File

@ -257,22 +257,32 @@ public class TestDFSClientRetries {
Matchers.<EnumSet<AddBlockFlag>>any()))
.thenAnswer(answer);
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mockNN).getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus.Builder()
.replication(1)
.blocksize(1024)
.perm(new FsPermission((short) 777))
.owner("owner")
.group("group")
.symlink(new byte[0])
.fileId(1010)
.build())
.when(mockNN)
.getFileInfo(anyString());
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null))
Mockito.doReturn(new HdfsFileStatus.Builder()
.replication(1)
.blocksize(1024)
.perm(new FsPermission((short) 777))
.owner("owner")
.group("group")
.symlink(new byte[0])
.fileId(1010)
.build())
.when(mockNN)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
final DFSClient client = new DFSClient(null, mockNN, conf, null);
OutputStream os = client.create("testfile", true);

View File

@ -889,20 +889,25 @@ public class TestEncryptionZones {
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0, null))
Mockito.doReturn(new HdfsFileStatus.Builder()
.replication(1)
.blocksize(1024)
.perm(new FsPermission((short) 777))
.owner("owner")
.group("group")
.symlink(new byte[0])
.path(new byte[0])
.fileId(1010)
.feInfo(new FileEncryptionInfo(suite, version,
new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"))
.build())
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
}
// This test only uses mocks. Called from the end of an existing test to
@ -1248,6 +1253,7 @@ public class TestEncryptionZones {
Mockito.when(keyProvider.getConf()).thenReturn(conf);
byte[] testIdentifier = "Test identifier for delegation token".getBytes();
@SuppressWarnings("rawtypes")
Token<?> testToken = new Token(testIdentifier, new byte[0],
new Text(), new Text());
Mockito.when(((DelegationTokenExtension)keyProvider).

View File

@ -30,7 +30,6 @@ import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -352,22 +351,33 @@ public class TestLease {
ugi[i] = UserGroupInformation.createUserForTesting("user" + i, groups);
}
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null)).when(mcp).getFileInfo(anyString());
Mockito
.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), EnumSet.noneOf(HdfsFileStatus.Flags.class),
"owner", "group", new byte[0], new byte[0],
1010, 0, null, (byte) 0, null))
Mockito.doReturn(new HdfsFileStatus.Builder()
.replication(1)
.blocksize(1024)
.perm(new FsPermission((short) 777))
.owner("owner")
.group("group")
.symlink(new byte[0])
.path(new byte[0])
.fileId(1010)
.build())
.when(mcp)
.getFileInfo(anyString());
Mockito.doReturn(new HdfsFileStatus.Builder()
.replication(1)
.blocksize(1024)
.perm(new FsPermission((short) 777))
.owner("owner")
.group("group")
.symlink(new byte[0])
.path(new byte[0])
.fileId(1010)
.build())
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject(),
anyObject());
final Configuration conf = new Configuration();
final DFSClient c1 = createDFSClientAs(ugi[0], conf);

View File

@ -48,7 +48,6 @@ import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -92,6 +91,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -1340,25 +1340,20 @@ public class TestFsck {
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 *1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte[] symlink = null;
byte[] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
blockSize, modTime, accessTime, perms,
EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, symlink,
path, fileId, numChildren, null, storagePolicy, null);
HdfsFileStatus file = new HdfsFileStatus.Builder()
.length(123L)
.replication(1)
.blocksize(128 * 1024L)
.mtime(123123123L)
.atime(123123120L)
.perm(FsPermission.getDefault())
.owner("foo")
.group("bar")
.path(DFSUtil.string2Bytes(pathString))
.fileId(312321L)
.children(1)
.storagePolicy(HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED)
.build();
Result replRes = new ReplicationResult(conf);
Result ecRes = new ErasureCodingResult(conf);

View File

@ -23,7 +23,6 @@ import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
@ -70,11 +69,19 @@ public class TestJsonUtil {
public void testHdfsFileStatus() throws IOException {
final long now = Time.now();
final String parent = "/dir";
final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
now, now + 10, new FsPermission((short) 0644),
EnumSet.noneOf(HdfsFileStatus.Flags.class), "user", "group",
DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
HdfsConstants.GRANDFATHER_INODE_ID, 0, null, (byte) 0, null);
final HdfsFileStatus status = new HdfsFileStatus.Builder()
.length(1001L)
.replication(3)
.blocksize(1L << 26)
.mtime(now)
.atime(now + 10)
.perm(new FsPermission((short) 0644))
.owner("user")
.group("group")
.symlink(DFSUtil.string2Bytes("bar"))
.path(DFSUtil.string2Bytes("foo"))
.fileId(HdfsConstants.GRANDFATHER_INODE_ID)
.build();
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);