HDFS-12681. Make HdfsLocatedFileStatus a subtype of LocatedFileStatus
This commit is contained in:
parent
c9a54aab6b
commit
0e560f3b8d
|
@ -56,15 +56,36 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
private Path symlink;
|
private Path symlink;
|
||||||
private Set<AttrFlags> attr;
|
private Set<AttrFlags> attr;
|
||||||
|
|
||||||
private enum AttrFlags {
|
/**
|
||||||
|
* Flags for entity attributes.
|
||||||
|
*/
|
||||||
|
public enum AttrFlags {
|
||||||
|
/** ACL information available for this entity. */
|
||||||
HAS_ACL,
|
HAS_ACL,
|
||||||
|
/** Entity is encrypted. */
|
||||||
HAS_CRYPT,
|
HAS_CRYPT,
|
||||||
|
/** Entity is stored erasure-coded. */
|
||||||
HAS_EC,
|
HAS_EC,
|
||||||
SNAPSHOT_ENABLED
|
/** Snapshot capability enabled. */
|
||||||
|
SNAPSHOT_ENABLED,
|
||||||
}
|
}
|
||||||
private static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
|
|
||||||
private static Set<AttrFlags> flags(boolean acl, boolean crypt, boolean ec) {
|
/**
|
||||||
if (!(acl || crypt || ec)) {
|
* Shared, empty set of attributes (a common case for FileStatus).
|
||||||
|
*/
|
||||||
|
public static final Set<AttrFlags> NONE = Collections.<AttrFlags>emptySet();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert boolean attributes to a set of flags.
|
||||||
|
* @param acl See {@link AttrFlags#HAS_ACL}.
|
||||||
|
* @param crypt See {@link AttrFlags#HAS_CRYPT}.
|
||||||
|
* @param ec See {@link AttrFlags#HAS_EC}.
|
||||||
|
* @param sn See {@link AttrFlags#SNAPSHOT_ENABLED}.
|
||||||
|
* @return converted set of flags.
|
||||||
|
*/
|
||||||
|
public static Set<AttrFlags> attributes(boolean acl, boolean crypt,
|
||||||
|
boolean ec, boolean sn) {
|
||||||
|
if (!(acl || crypt || ec || sn)) {
|
||||||
return NONE;
|
return NONE;
|
||||||
}
|
}
|
||||||
EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
|
EnumSet<AttrFlags> ret = EnumSet.noneOf(AttrFlags.class);
|
||||||
|
@ -77,6 +98,9 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
if (ec) {
|
if (ec) {
|
||||||
ret.add(AttrFlags.HAS_EC);
|
ret.add(AttrFlags.HAS_EC);
|
||||||
}
|
}
|
||||||
|
if (sn) {
|
||||||
|
ret.add(AttrFlags.SNAPSHOT_ENABLED);
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,6 +141,15 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
long blocksize, long modification_time, long access_time,
|
long blocksize, long modification_time, long access_time,
|
||||||
FsPermission permission, String owner, String group, Path symlink,
|
FsPermission permission, String owner, String group, Path symlink,
|
||||||
Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
|
Path path, boolean hasAcl, boolean isEncrypted, boolean isErasureCoded) {
|
||||||
|
this(length, isdir, block_replication, blocksize, modification_time,
|
||||||
|
access_time, permission, owner, group, symlink, path,
|
||||||
|
attributes(hasAcl, isEncrypted, isErasureCoded, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
public FileStatus(long length, boolean isdir, int block_replication,
|
||||||
|
long blocksize, long modification_time, long access_time,
|
||||||
|
FsPermission permission, String owner, String group, Path symlink,
|
||||||
|
Path path, Set<AttrFlags> attr) {
|
||||||
this.length = length;
|
this.length = length;
|
||||||
this.isdir = isdir;
|
this.isdir = isdir;
|
||||||
this.block_replication = (short)block_replication;
|
this.block_replication = (short)block_replication;
|
||||||
|
@ -136,7 +169,7 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
this.group = (group == null) ? "" : group;
|
this.group = (group == null) ? "" : group;
|
||||||
this.symlink = symlink;
|
this.symlink = symlink;
|
||||||
this.path = path;
|
this.path = path;
|
||||||
attr = flags(hasAcl, isEncrypted, isErasureCoded);
|
this.attr = attr;
|
||||||
|
|
||||||
// The variables isdir and symlink indicate the type:
|
// The variables isdir and symlink indicate the type:
|
||||||
// 1. isdir implies directory, in which case symlink must be null.
|
// 1. isdir implies directory, in which case symlink must be null.
|
||||||
|
@ -340,19 +373,6 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
this.group = (group == null) ? "" : group;
|
this.group = (group == null) ? "" : group;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets Snapshot enabled flag.
|
|
||||||
*
|
|
||||||
* @param isSnapShotEnabled When true, SNAPSHOT_ENABLED flag is set
|
|
||||||
*/
|
|
||||||
public void setSnapShotEnabledFlag(boolean isSnapShotEnabled) {
|
|
||||||
if (isSnapShotEnabled) {
|
|
||||||
attr.add(AttrFlags.SNAPSHOT_ENABLED);
|
|
||||||
} else {
|
|
||||||
attr.remove(AttrFlags.SNAPSHOT_ENABLED);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return The contents of the symbolic link.
|
* @return The contents of the symbolic link.
|
||||||
*/
|
*/
|
||||||
|
@ -480,7 +500,8 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
setGroup(other.getGroup());
|
setGroup(other.getGroup());
|
||||||
setSymlink((other.isSymlink() ? other.getSymlink() : null));
|
setSymlink((other.isSymlink() ? other.getSymlink() : null));
|
||||||
setPath(other.getPath());
|
setPath(other.getPath());
|
||||||
attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
|
attr = attributes(other.hasAcl(), other.isEncrypted(),
|
||||||
|
other.isErasureCoded(), other.isSnapshotEnabled());
|
||||||
assert (isDirectory() && getSymlink() == null) || !isDirectory();
|
assert (isDirectory() && getSymlink() == null) || !isDirectory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -115,9 +116,35 @@ public class LocatedFileStatus extends FileStatus {
|
||||||
Path symlink, Path path,
|
Path symlink, Path path,
|
||||||
boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
|
boolean hasAcl, boolean isEncrypted, boolean isErasureCoded,
|
||||||
BlockLocation[] locations) {
|
BlockLocation[] locations) {
|
||||||
super(length, isdir, block_replication, blocksize, modification_time,
|
this(length, isdir, block_replication, blocksize, modification_time,
|
||||||
access_time, permission, owner, group, symlink, path,
|
access_time, permission, owner, group, symlink, path,
|
||||||
hasAcl, isEncrypted, isErasureCoded);
|
attributes(hasAcl, isEncrypted, isErasureCoded, false), locations);
|
||||||
|
this.locations = locations;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
*
|
||||||
|
* @param length a file's length
|
||||||
|
* @param isdir if the path is a directory
|
||||||
|
* @param block_replication the file's replication factor
|
||||||
|
* @param blocksize a file's block size
|
||||||
|
* @param modification_time a file's modification time
|
||||||
|
* @param access_time a file's access time
|
||||||
|
* @param permission a file's permission
|
||||||
|
* @param owner a file's owner
|
||||||
|
* @param group a file's group
|
||||||
|
* @param symlink symlink if the path is a symbolic link
|
||||||
|
* @param path the path's qualified name
|
||||||
|
* @param attr Attribute flags (See {@link FileStatus.AttrFlags}).
|
||||||
|
* @param locations a file's block locations
|
||||||
|
*/
|
||||||
|
public LocatedFileStatus(long length, boolean isdir, int block_replication,
|
||||||
|
long blocksize, long modification_time, long access_time,
|
||||||
|
FsPermission permission, String owner, String group, Path symlink,
|
||||||
|
Path path, Set<AttrFlags> attr, BlockLocation[] locations) {
|
||||||
|
super(length, isdir, block_replication, blocksize, modification_time,
|
||||||
|
access_time, permission, owner, group, symlink, path, attr);
|
||||||
this.locations = locations;
|
this.locations = locations;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,6 +163,15 @@ public class LocatedFileStatus extends FileStatus {
|
||||||
return locations;
|
return locations;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Hook for subclasses to lazily set block locations. The {@link #locations}
|
||||||
|
* field should be null before this is called.
|
||||||
|
* @param locations Block locations for this instance.
|
||||||
|
*/
|
||||||
|
protected void setBlockLocations(BlockLocation[] locations) {
|
||||||
|
this.locations = locations;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare this FileStatus to another FileStatus
|
* Compare this FileStatus to another FileStatus
|
||||||
* @param o the FileStatus to be compared.
|
* @param o the FileStatus to be compared.
|
||||||
|
|
|
@ -96,12 +96,11 @@ public final class PBHelper {
|
||||||
int flags = proto.getFlags();
|
int flags = proto.getFlags();
|
||||||
FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
|
FileStatus fileStatus = new FileStatus(length, isdir, blockReplication,
|
||||||
blocksize, mtime, atime, permission, owner, group, symlink, path,
|
blocksize, mtime, atime, permission, owner, group, symlink, path,
|
||||||
|
FileStatus.attributes(
|
||||||
(flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0,
|
(flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0,
|
||||||
(flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
|
(flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0,
|
||||||
(flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0);
|
(flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0,
|
||||||
|
(flags & FileStatusProto.Flags.SNAPSHOT_ENABLED_VALUE) != 0));
|
||||||
fileStatus.setSnapShotEnabledFlag((flags & FileStatusProto.Flags
|
|
||||||
.SNAPSHOT_ENABLED_VALUE) != 0);
|
|
||||||
return fileStatus;
|
return fileStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,4 +60,34 @@
|
||||||
<Field name="cachingStrategy" />
|
<Field name="cachingStrategy" />
|
||||||
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
||||||
</Match>
|
</Match>
|
||||||
|
|
||||||
|
<!-- BlockLocations are user-facing, but LocatedBlocks are not. -->
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
|
||||||
|
<Field name="hdfsloc" />
|
||||||
|
<Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED" />
|
||||||
|
</Match>
|
||||||
|
|
||||||
|
<!-- Hdfs*FileStatus are internal types. This "internal" state is not sensitive. -->
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.HdfsNamedFileStatus" />
|
||||||
|
<Method name="getLocalNameInBytes" />
|
||||||
|
<Bug pattern="EI_EXPOSE_REP" />
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.HdfsNamedFileStatus" />
|
||||||
|
<Method name="getSymlinkInBytes" />
|
||||||
|
<Bug pattern="EI_EXPOSE_REP" />
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
|
||||||
|
<Method name="getLocalNameInBytes" />
|
||||||
|
<Bug pattern="EI_EXPOSE_REP" />
|
||||||
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
|
||||||
|
<Method name="getSymlinkInBytes" />
|
||||||
|
<Bug pattern="EI_EXPOSE_REP" />
|
||||||
|
</Match>
|
||||||
|
|
||||||
</FindBugsFilter>
|
</FindBugsFilter>
|
||||||
|
|
|
@ -18,271 +18,40 @@
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.ObjectInputValidation;
|
||||||
|
import java.io.Serializable;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.FileStatus.AttrFlags;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
import org.apache.hadoop.io.Writable;
|
||||||
|
|
||||||
/** Interface that represents the over the wire information for a file.
|
/**
|
||||||
|
* HDFS metadata for an entity in the filesystem.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class HdfsFileStatus extends FileStatus {
|
public interface HdfsFileStatus
|
||||||
|
extends Writable, Comparable<Object>, Serializable, ObjectInputValidation {
|
||||||
|
|
||||||
private static final long serialVersionUID = 0x126eb82a;
|
byte[] EMPTY_NAME = new byte[0];
|
||||||
|
|
||||||
// local name of the inode that's encoded in java UTF8
|
/** Set of features potentially active on an instance. */
|
||||||
private byte[] uPath;
|
enum Flags {
|
||||||
private byte[] uSymlink; // symlink target encoded in java UTF8/null
|
|
||||||
private final long fileId;
|
|
||||||
private final FileEncryptionInfo feInfo;
|
|
||||||
private final ErasureCodingPolicy ecPolicy;
|
|
||||||
|
|
||||||
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
|
||||||
private final int childrenNum;
|
|
||||||
private final byte storagePolicy;
|
|
||||||
|
|
||||||
public static final byte[] EMPTY_NAME = new byte[0];
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set of features potentially active on an instance.
|
|
||||||
*/
|
|
||||||
public enum Flags {
|
|
||||||
HAS_ACL,
|
HAS_ACL,
|
||||||
HAS_CRYPT,
|
HAS_CRYPT,
|
||||||
HAS_EC,
|
HAS_EC,
|
||||||
SNAPSHOT_ENABLED
|
SNAPSHOT_ENABLED
|
||||||
}
|
}
|
||||||
private final EnumSet<Flags> flags;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor.
|
|
||||||
* @param length the number of bytes the file has
|
|
||||||
* @param isdir if the path is a directory
|
|
||||||
* @param replication the replication factor
|
|
||||||
* @param blocksize the block size
|
|
||||||
* @param mtime modification time
|
|
||||||
* @param atime access time
|
|
||||||
* @param permission permission
|
|
||||||
* @param owner the owner of the path
|
|
||||||
* @param group the group of the path
|
|
||||||
* @param symlink symlink target encoded in java UTF8 or null
|
|
||||||
* @param path the local name in java UTF8 encoding the same as that in-memory
|
|
||||||
* @param fileId the file id
|
|
||||||
* @param childrenNum the number of children. Used by directory.
|
|
||||||
* @param feInfo the file's encryption info
|
|
||||||
* @param storagePolicy ID which specifies storage policy
|
|
||||||
* @param ecPolicy the erasure coding policy
|
|
||||||
*/
|
|
||||||
protected HdfsFileStatus(long length, boolean isdir, int replication,
|
|
||||||
long blocksize, long mtime, long atime,
|
|
||||||
FsPermission permission, EnumSet<Flags> flags,
|
|
||||||
String owner, String group,
|
|
||||||
byte[] symlink, byte[] path, long fileId,
|
|
||||||
int childrenNum, FileEncryptionInfo feInfo,
|
|
||||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
|
||||||
super(length, isdir, replication, blocksize, mtime,
|
|
||||||
atime, convert(isdir, symlink != null, permission, flags),
|
|
||||||
owner, group, null, null,
|
|
||||||
flags.contains(Flags.HAS_ACL), flags.contains(Flags.HAS_CRYPT),
|
|
||||||
flags.contains(Flags.HAS_EC));
|
|
||||||
this.flags = flags;
|
|
||||||
this.uSymlink = symlink;
|
|
||||||
this.uPath = path;
|
|
||||||
this.fileId = fileId;
|
|
||||||
this.childrenNum = childrenNum;
|
|
||||||
this.feInfo = feInfo;
|
|
||||||
this.storagePolicy = storagePolicy;
|
|
||||||
this.ecPolicy = ecPolicy;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set redundant flags for compatibility with existing applications.
|
|
||||||
*/
|
|
||||||
protected static FsPermission convert(boolean isdir, boolean symlink,
|
|
||||||
FsPermission p, EnumSet<Flags> f) {
|
|
||||||
if (p instanceof FsPermissionExtension) {
|
|
||||||
// verify flags are set consistently
|
|
||||||
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
|
|
||||||
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
|
|
||||||
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
|
|
||||||
return p;
|
|
||||||
}
|
|
||||||
if (null == p) {
|
|
||||||
if (isdir) {
|
|
||||||
p = FsPermission.getDirDefault();
|
|
||||||
} else if (symlink) {
|
|
||||||
p = FsPermission.getDefault();
|
|
||||||
} else {
|
|
||||||
p = FsPermission.getFileDefault();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
|
|
||||||
f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isSymlink() {
|
|
||||||
return uSymlink != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasAcl() {
|
|
||||||
return flags.contains(Flags.HAS_ACL);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isEncrypted() {
|
|
||||||
return flags.contains(Flags.HAS_CRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isErasureCoded() {
|
|
||||||
return flags.contains(Flags.HAS_EC);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if the local name is empty.
|
|
||||||
* @return true if the name is empty
|
|
||||||
*/
|
|
||||||
public final boolean isEmptyLocalName() {
|
|
||||||
return uPath.length == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the string representation of the local name.
|
|
||||||
* @return the local name in string
|
|
||||||
*/
|
|
||||||
public final String getLocalName() {
|
|
||||||
return DFSUtilClient.bytes2String(uPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the Java UTF8 representation of the local name.
|
|
||||||
* @return the local name in java UTF8
|
|
||||||
*/
|
|
||||||
public final byte[] getLocalNameInBytes() {
|
|
||||||
return uPath;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the string representation of the full path name.
|
|
||||||
* @param parent the parent path
|
|
||||||
* @return the full path in string
|
|
||||||
*/
|
|
||||||
public final String getFullName(final String parent) {
|
|
||||||
if (isEmptyLocalName()) {
|
|
||||||
return parent;
|
|
||||||
}
|
|
||||||
|
|
||||||
StringBuilder fullName = new StringBuilder(parent);
|
|
||||||
if (!parent.endsWith(Path.SEPARATOR)) {
|
|
||||||
fullName.append(Path.SEPARATOR);
|
|
||||||
}
|
|
||||||
fullName.append(getLocalName());
|
|
||||||
return fullName.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the full path.
|
|
||||||
* @param parent the parent path
|
|
||||||
* @return the full path
|
|
||||||
*/
|
|
||||||
public final Path getFullPath(final Path parent) {
|
|
||||||
if (isEmptyLocalName()) {
|
|
||||||
return parent;
|
|
||||||
}
|
|
||||||
|
|
||||||
return new Path(parent, getLocalName());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Path getSymlink() throws IOException {
|
|
||||||
if (isSymlink()) {
|
|
||||||
return new Path(DFSUtilClient.bytes2String(uSymlink));
|
|
||||||
}
|
|
||||||
throw new IOException("Path " + getPath() + " is not a symbolic link");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setSymlink(Path sym) {
|
|
||||||
uSymlink = DFSUtilClient.string2Bytes(sym.toString());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Opaque referant for the symlink, to be resolved at the client.
|
|
||||||
*/
|
|
||||||
public final byte[] getSymlinkInBytes() {
|
|
||||||
return uSymlink;
|
|
||||||
}
|
|
||||||
|
|
||||||
public final long getFileId() {
|
|
||||||
return fileId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public final FileEncryptionInfo getFileEncryptionInfo() {
|
|
||||||
return feInfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the erasure coding policy if it's set.
|
|
||||||
* @return the erasure coding policy
|
|
||||||
*/
|
|
||||||
public ErasureCodingPolicy getErasureCodingPolicy() {
|
|
||||||
return ecPolicy;
|
|
||||||
}
|
|
||||||
|
|
||||||
public final int getChildrenNum() {
|
|
||||||
return childrenNum;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return the storage policy id */
|
|
||||||
public final byte getStoragePolicy() {
|
|
||||||
return storagePolicy;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check if directory is Snapshot enabled or not.
|
|
||||||
*
|
|
||||||
* @return true if directory is snapshot enabled
|
|
||||||
*/
|
|
||||||
public boolean isSnapshotEnabled() {
|
|
||||||
return flags.contains(Flags.SNAPSHOT_ENABLED);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object o) {
|
|
||||||
// satisfy findbugs
|
|
||||||
return super.equals(o);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
// satisfy findbugs
|
|
||||||
return super.hashCode();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Resolve the short name of the Path given the URI, parent provided. This
|
|
||||||
* FileStatus reference will not contain a valid Path until it is resolved
|
|
||||||
* by this method.
|
|
||||||
* @param defaultUri FileSystem to fully qualify HDFS path.
|
|
||||||
* @param parent Parent path of this element.
|
|
||||||
* @return Reference to this instance.
|
|
||||||
*/
|
|
||||||
public final FileStatus makeQualified(URI defaultUri, Path parent) {
|
|
||||||
// fully-qualify path
|
|
||||||
setPath(getFullPath(parent).makeQualified(defaultUri, null));
|
|
||||||
return this; // API compatibility
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder class for HdfsFileStatus instances. Note default values for
|
* Builder class for HdfsFileStatus instances. Note default values for
|
||||||
|
@ -290,7 +59,7 @@ public class HdfsFileStatus extends FileStatus {
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public static class Builder {
|
class Builder {
|
||||||
// Changing default values will affect cases where values are not
|
// Changing default values will affect cases where values are not
|
||||||
// specified. Be careful!
|
// specified. Be careful!
|
||||||
private long length = 0L;
|
private long length = 0L;
|
||||||
|
@ -311,6 +80,7 @@ public class HdfsFileStatus extends FileStatus {
|
||||||
private byte storagePolicy =
|
private byte storagePolicy =
|
||||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||||
private ErasureCodingPolicy ecPolicy = null;
|
private ErasureCodingPolicy ecPolicy = null;
|
||||||
|
private LocatedBlocks locations = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the length of the entity (default = 0).
|
* Set the length of the entity (default = 0).
|
||||||
|
@ -489,14 +259,280 @@ public class HdfsFileStatus extends FileStatus {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the block locations for this entity (default = null).
|
||||||
|
* @param locations HDFS locations
|
||||||
|
* (see {@link HdfsLocatedFileStatus#makeQualifiedLocated(URI, Path)})
|
||||||
|
* @return This Builder instance
|
||||||
|
*/
|
||||||
|
public Builder locations(LocatedBlocks locations) {
|
||||||
|
this.locations = locations;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return An {@link HdfsFileStatus} instance from these parameters.
|
* @return An {@link HdfsFileStatus} instance from these parameters.
|
||||||
*/
|
*/
|
||||||
public HdfsFileStatus build() {
|
public HdfsFileStatus build() {
|
||||||
return new HdfsFileStatus(length, isdir, replication, blocksize,
|
if (null == locations && !isdir && null == symlink) {
|
||||||
mtime, atime, permission, flags, owner, group, symlink, path, fileId,
|
return new HdfsNamedFileStatus(length, isdir, replication, blocksize,
|
||||||
childrenNum, feInfo, storagePolicy, ecPolicy);
|
mtime, atime, permission, flags, owner, group, symlink, path,
|
||||||
|
fileId, childrenNum, feInfo, storagePolicy, ecPolicy);
|
||||||
}
|
}
|
||||||
|
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
|
||||||
|
mtime, atime, permission, flags, owner, group, symlink, path,
|
||||||
|
fileId, childrenNum, feInfo, storagePolicy, ecPolicy, locations);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
///////////////////
|
||||||
|
// HDFS-specific //
|
||||||
|
///////////////////
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Inode ID for this entity, if a file.
|
||||||
|
* @return inode ID.
|
||||||
|
*/
|
||||||
|
long getFileId();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get metadata for encryption, if present.
|
||||||
|
* @return the {@link FileEncryptionInfo} for this stream, or null if not
|
||||||
|
* encrypted.
|
||||||
|
*/
|
||||||
|
FileEncryptionInfo getFileEncryptionInfo();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the local name is empty.
|
||||||
|
* @return true if the name is empty
|
||||||
|
*/
|
||||||
|
default boolean isEmptyLocalName() {
|
||||||
|
return getLocalNameInBytes().length == 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the string representation of the local name.
|
||||||
|
* @return the local name in string
|
||||||
|
*/
|
||||||
|
default String getLocalName() {
|
||||||
|
return DFSUtilClient.bytes2String(getLocalNameInBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the Java UTF8 representation of the local name.
|
||||||
|
* @return the local name in java UTF8
|
||||||
|
*/
|
||||||
|
byte[] getLocalNameInBytes();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the string representation of the full path name.
|
||||||
|
* @param parent the parent path
|
||||||
|
* @return the full path in string
|
||||||
|
*/
|
||||||
|
default String getFullName(String parent) {
|
||||||
|
if (isEmptyLocalName()) {
|
||||||
|
return parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
StringBuilder fullName = new StringBuilder(parent);
|
||||||
|
if (!parent.endsWith(Path.SEPARATOR)) {
|
||||||
|
fullName.append(Path.SEPARATOR);
|
||||||
|
}
|
||||||
|
fullName.append(getLocalName());
|
||||||
|
return fullName.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the full path.
|
||||||
|
* @param parent the parent path
|
||||||
|
* @return the full path
|
||||||
|
*/
|
||||||
|
default Path getFullPath(Path parent) {
|
||||||
|
if (isEmptyLocalName()) {
|
||||||
|
return parent;
|
||||||
|
}
|
||||||
|
|
||||||
|
return new Path(parent, getLocalName());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque referant for the symlink, to be resolved at the client.
|
||||||
|
*/
|
||||||
|
byte[] getSymlinkInBytes();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return number of children for this inode.
|
||||||
|
*/
|
||||||
|
int getChildrenNum();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the erasure coding policy if it's set.
|
||||||
|
* @return the erasure coding policy
|
||||||
|
*/
|
||||||
|
ErasureCodingPolicy getErasureCodingPolicy();
|
||||||
|
|
||||||
|
/** @return the storage policy id */
|
||||||
|
byte getStoragePolicy();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolve the short name of the Path given the URI, parent provided. This
|
||||||
|
* FileStatus reference will not contain a valid Path until it is resolved
|
||||||
|
* by this method.
|
||||||
|
* @param defaultUri FileSystem to fully qualify HDFS path.
|
||||||
|
* @param parent Parent path of this element.
|
||||||
|
* @return Reference to this instance.
|
||||||
|
*/
|
||||||
|
default FileStatus makeQualified(URI defaultUri, Path parent) {
|
||||||
|
// fully-qualify path
|
||||||
|
setPath(getFullPath(parent).makeQualified(defaultUri, null));
|
||||||
|
return (FileStatus) this; // API compatibility
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////
|
||||||
|
// FileStatus "overrides" //
|
||||||
|
////////////////////////////
|
||||||
|
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getPath()}.
|
||||||
|
*/
|
||||||
|
Path getPath();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#setPath(Path)}.
|
||||||
|
*/
|
||||||
|
void setPath(Path p);
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getLen()}.
|
||||||
|
*/
|
||||||
|
long getLen();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isFile()}.
|
||||||
|
*/
|
||||||
|
boolean isFile();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isDirectory()}.
|
||||||
|
*/
|
||||||
|
boolean isDirectory();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isDir()}.
|
||||||
|
*/
|
||||||
|
boolean isDir();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isSymlink()}.
|
||||||
|
*/
|
||||||
|
boolean isSymlink();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getBlockSize()}.
|
||||||
|
*/
|
||||||
|
long getBlockSize();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getReplication()}.
|
||||||
|
*/
|
||||||
|
short getReplication();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getModificationTime()}.
|
||||||
|
*/
|
||||||
|
long getModificationTime();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getAccessTime()}.
|
||||||
|
*/
|
||||||
|
long getAccessTime();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getPermission()}.
|
||||||
|
*/
|
||||||
|
FsPermission getPermission();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#setPermission(FsPermission)}.
|
||||||
|
*/
|
||||||
|
void setPermission(FsPermission permission);
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getOwner()}.
|
||||||
|
*/
|
||||||
|
String getOwner();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#setOwner(String)}.
|
||||||
|
*/
|
||||||
|
void setOwner(String owner);
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getGroup()}.
|
||||||
|
*/
|
||||||
|
String getGroup();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#setGroup(String)}.
|
||||||
|
*/
|
||||||
|
void setGroup(String group);
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#hasAcl()}.
|
||||||
|
*/
|
||||||
|
boolean hasAcl();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isEncrypted()}.
|
||||||
|
*/
|
||||||
|
boolean isEncrypted();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isErasureCoded()}.
|
||||||
|
*/
|
||||||
|
boolean isErasureCoded();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#isSnapshotEnabled()}.
|
||||||
|
*/
|
||||||
|
boolean isSnapshotEnabled();
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#getSymlink()}.
|
||||||
|
*/
|
||||||
|
Path getSymlink() throws IOException;
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#setSymlink(Path sym)}.
|
||||||
|
*/
|
||||||
|
void setSymlink(Path sym);
|
||||||
|
/**
|
||||||
|
* See {@link FileStatus#compareTo(FileStatus)}.
|
||||||
|
*/
|
||||||
|
int compareTo(FileStatus stat);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set redundant flags for compatibility with existing applications.
|
||||||
|
*/
|
||||||
|
static FsPermission convert(boolean isdir, boolean symlink,
|
||||||
|
FsPermission p, Set<Flags> f) {
|
||||||
|
if (p instanceof FsPermissionExtension) {
|
||||||
|
// verify flags are set consistently
|
||||||
|
assert p.getAclBit() == f.contains(HdfsFileStatus.Flags.HAS_ACL);
|
||||||
|
assert p.getEncryptedBit() == f.contains(HdfsFileStatus.Flags.HAS_CRYPT);
|
||||||
|
assert p.getErasureCodedBit() == f.contains(HdfsFileStatus.Flags.HAS_EC);
|
||||||
|
return p;
|
||||||
|
}
|
||||||
|
if (null == p) {
|
||||||
|
if (isdir) {
|
||||||
|
p = FsPermission.getDirDefault();
|
||||||
|
} else if (symlink) {
|
||||||
|
p = FsPermission.getDefault();
|
||||||
|
} else {
|
||||||
|
p = FsPermission.getFileDefault();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new FsPermissionExtension(p, f.contains(Flags.HAS_ACL),
|
||||||
|
f.contains(Flags.HAS_CRYPT), f.contains(Flags.HAS_EC));
|
||||||
|
}
|
||||||
|
|
||||||
|
static Set<AttrFlags> convert(Set<Flags> flags) {
|
||||||
|
if (flags.isEmpty()) {
|
||||||
|
return FileStatus.NONE;
|
||||||
|
}
|
||||||
|
EnumSet<AttrFlags> attr = EnumSet.noneOf(AttrFlags.class);
|
||||||
|
if (flags.contains(Flags.HAS_ACL)) {
|
||||||
|
attr.add(AttrFlags.HAS_ACL);
|
||||||
|
}
|
||||||
|
if (flags.contains(Flags.HAS_EC)) {
|
||||||
|
attr.add(AttrFlags.HAS_EC);
|
||||||
|
}
|
||||||
|
if (flags.contains(Flags.HAS_CRYPT)) {
|
||||||
|
attr.add(AttrFlags.HAS_CRYPT);
|
||||||
|
}
|
||||||
|
if (flags.contains(Flags.SNAPSHOT_ENABLED)) {
|
||||||
|
attr.add(AttrFlags.SNAPSHOT_ENABLED);
|
||||||
|
}
|
||||||
|
return attr;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.protocol;
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
@ -29,71 +30,150 @@ import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface that represents the over the wire information
|
* HDFS metadata for an entity in the filesystem with locations. Note that
|
||||||
* including block locations for a file.
|
* symlinks and directories are returned as {@link HdfsLocatedFileStatus} for
|
||||||
|
* backwards compatibility.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
public class HdfsLocatedFileStatus
|
||||||
|
extends LocatedFileStatus implements HdfsFileStatus {
|
||||||
|
private static final long serialVersionUID = 0x126eb82a;
|
||||||
|
|
||||||
private static final long serialVersionUID = 0x23c73328;
|
// local name of the inode that's encoded in java UTF8
|
||||||
|
private byte[] uPath;
|
||||||
|
private byte[] uSymlink; // symlink target encoded in java UTF8/null
|
||||||
|
private final long fileId;
|
||||||
|
private final FileEncryptionInfo feInfo;
|
||||||
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
|
|
||||||
|
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||||
|
private final int childrenNum;
|
||||||
|
private final byte storagePolicy;
|
||||||
|
|
||||||
|
// BlockLocations[] is the user-facing type
|
||||||
|
private transient LocatedBlocks hdfsloc;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Left transient, because {@link #makeQualifiedLocated(URI,Path)}
|
* Constructor.
|
||||||
* is the user-facing type.
|
* @param length the number of bytes the file has
|
||||||
*/
|
* @param isdir if the path is a directory
|
||||||
private transient LocatedBlocks locations;
|
* @param replication the replication factor
|
||||||
|
* @param blocksize the block size
|
||||||
/**
|
* @param mtime modification time
|
||||||
* Constructor
|
* @param atime access time
|
||||||
*
|
|
||||||
* @param length size
|
|
||||||
* @param isdir if this is directory
|
|
||||||
* @param block_replication the file's replication factor
|
|
||||||
* @param blocksize the file's block size
|
|
||||||
* @param modification_time most recent modification time
|
|
||||||
* @param access_time most recent access time
|
|
||||||
* @param permission permission
|
* @param permission permission
|
||||||
* @param owner owner
|
* @param owner the owner of the path
|
||||||
* @param group group
|
* @param group the group of the path
|
||||||
* @param symlink symbolic link
|
* @param symlink symlink target encoded in java UTF8 or null
|
||||||
* @param path local path name in java UTF8 format
|
* @param path the local name in java UTF8 encoding the same as that in-memory
|
||||||
* @param fileId the file id
|
* @param fileId the file id
|
||||||
* @param locations block locations
|
* @param childrenNum the number of children. Used by directory.
|
||||||
* @param feInfo file encryption info
|
* @param feInfo the file's encryption info
|
||||||
|
* @param storagePolicy ID which specifies storage policy
|
||||||
|
* @param ecPolicy the erasure coding policy
|
||||||
|
* @param hdfsloc block locations
|
||||||
*/
|
*/
|
||||||
public HdfsLocatedFileStatus(long length, boolean isdir,
|
HdfsLocatedFileStatus(long length, boolean isdir, int replication,
|
||||||
int block_replication, long blocksize, long modification_time,
|
long blocksize, long mtime, long atime,
|
||||||
long access_time, FsPermission permission, EnumSet<Flags> flags,
|
FsPermission permission, EnumSet<Flags> flags,
|
||||||
String owner, String group, byte[] symlink, byte[] path, long fileId,
|
String owner, String group,
|
||||||
LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo,
|
byte[] symlink, byte[] path, long fileId,
|
||||||
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
int childrenNum, FileEncryptionInfo feInfo,
|
||||||
super(length, isdir, block_replication, blocksize, modification_time,
|
byte storagePolicy, ErasureCodingPolicy ecPolicy,
|
||||||
access_time, permission, flags, owner, group, symlink, path, fileId,
|
LocatedBlocks hdfsloc) {
|
||||||
childrenNum, feInfo, storagePolicy, ecPolicy);
|
super(length, isdir, replication, blocksize, mtime, atime,
|
||||||
this.locations = locations;
|
HdfsFileStatus.convert(isdir, symlink != null, permission, flags),
|
||||||
|
owner, group, null, null, HdfsFileStatus.convert(flags),
|
||||||
|
null);
|
||||||
|
this.uSymlink = symlink;
|
||||||
|
this.uPath = path;
|
||||||
|
this.fileId = fileId;
|
||||||
|
this.childrenNum = childrenNum;
|
||||||
|
this.feInfo = feInfo;
|
||||||
|
this.storagePolicy = storagePolicy;
|
||||||
|
this.ecPolicy = ecPolicy;
|
||||||
|
this.hdfsloc = hdfsloc;
|
||||||
}
|
}
|
||||||
|
|
||||||
public LocatedBlocks getBlockLocations() {
|
@Override // visibility
|
||||||
return locations;
|
public void setOwner(String owner) {
|
||||||
|
super.setOwner(owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // visibility
|
||||||
|
public void setGroup(String group) {
|
||||||
|
super.setOwner(group);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSymlink() {
|
||||||
|
return uSymlink != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getSymlink() throws IOException {
|
||||||
|
if (isSymlink()) {
|
||||||
|
return new Path(DFSUtilClient.bytes2String(getSymlinkInBytes()));
|
||||||
|
}
|
||||||
|
throw new IOException("Path " + getPath() + " is not a symbolic link");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override // visibility
|
||||||
|
public void setPermission(FsPermission permission) {
|
||||||
|
super.setPermission(permission);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This function is used to transform the underlying HDFS LocatedBlocks to
|
* Get the Java UTF8 representation of the local name.
|
||||||
* BlockLocations.
|
* @return the local name in java UTF8
|
||||||
*
|
|
||||||
* The returned BlockLocation will have different formats for replicated
|
|
||||||
* and erasure coded file.
|
|
||||||
* Please refer to
|
|
||||||
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
|
|
||||||
* (FileStatus, long, long)}
|
|
||||||
* for examples.
|
|
||||||
*/
|
*/
|
||||||
public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
|
@Override
|
||||||
Path path) {
|
public byte[] getLocalNameInBytes() {
|
||||||
makeQualified(defaultUri, path);
|
return uPath;
|
||||||
return new LocatedFileStatus(this,
|
}
|
||||||
DFSUtilClient.locatedBlocks2Locations(getBlockLocations()));
|
|
||||||
|
@Override
|
||||||
|
public void setSymlink(Path sym) {
|
||||||
|
uSymlink = DFSUtilClient.string2Bytes(sym.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque referant for the symlink, to be resolved at the client.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public byte[] getSymlinkInBytes() {
|
||||||
|
return uSymlink;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getFileId() {
|
||||||
|
return fileId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileEncryptionInfo getFileEncryptionInfo() {
|
||||||
|
return feInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the erasure coding policy if it's set.
|
||||||
|
* @return the erasure coding policy
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
|
return ecPolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getChildrenNum() {
|
||||||
|
return childrenNum;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the storage policy id */
|
||||||
|
@Override
|
||||||
|
public byte getStoragePolicy() {
|
||||||
|
return storagePolicy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -107,4 +187,34 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
|
||||||
// satisfy findbugs
|
// satisfy findbugs
|
||||||
return super.hashCode();
|
return super.hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get block locations for this entity, in HDFS format.
|
||||||
|
* See {@link #makeQualifiedLocated(URI, Path)}.
|
||||||
|
* See {@link DFSUtilClient#locatedBlocks2Locations(LocatedBlocks)}.
|
||||||
|
* @return block locations
|
||||||
|
*/
|
||||||
|
public LocatedBlocks getLocatedBlocks() {
|
||||||
|
return hdfsloc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This function is used to transform the underlying HDFS LocatedBlocks to
|
||||||
|
* BlockLocations. This method must be invoked before
|
||||||
|
* {@link #getBlockLocations()}.
|
||||||
|
*
|
||||||
|
* The returned BlockLocation will have different formats for replicated
|
||||||
|
* and erasure coded file.
|
||||||
|
* Please refer to
|
||||||
|
* {@link org.apache.hadoop.fs.FileSystem#getFileBlockLocations
|
||||||
|
* (FileStatus, long, long)}
|
||||||
|
* for examples.
|
||||||
|
*/
|
||||||
|
public LocatedFileStatus makeQualifiedLocated(URI defaultUri, Path path) {
|
||||||
|
makeQualified(defaultUri, path);
|
||||||
|
setBlockLocations(
|
||||||
|
DFSUtilClient.locatedBlocks2Locations(getLocatedBlocks()));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,180 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* HDFS metadata for an entity in the filesystem without locations. Note that
|
||||||
|
* symlinks and directories are returned as {@link HdfsLocatedFileStatus} for
|
||||||
|
* backwards compatibility.
|
||||||
|
*/
|
||||||
|
public class HdfsNamedFileStatus extends FileStatus implements HdfsFileStatus {
|
||||||
|
|
||||||
|
// local name of the inode that's encoded in java UTF8
|
||||||
|
private byte[] uPath;
|
||||||
|
private byte[] uSymlink; // symlink target encoded in java UTF8/null
|
||||||
|
private final long fileId;
|
||||||
|
private final FileEncryptionInfo feInfo;
|
||||||
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
|
|
||||||
|
// Used by dir, not including dot and dotdot. Always zero for a regular file.
|
||||||
|
private final int childrenNum;
|
||||||
|
private final byte storagePolicy;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
* @param length the number of bytes the file has
|
||||||
|
* @param isdir if the path is a directory
|
||||||
|
* @param replication the replication factor
|
||||||
|
* @param blocksize the block size
|
||||||
|
* @param mtime modification time
|
||||||
|
* @param atime access time
|
||||||
|
* @param permission permission
|
||||||
|
* @param owner the owner of the path
|
||||||
|
* @param group the group of the path
|
||||||
|
* @param symlink symlink target encoded in java UTF8 or null
|
||||||
|
* @param path the local name in java UTF8 encoding the same as that in-memory
|
||||||
|
* @param fileId the file id
|
||||||
|
* @param childrenNum the number of children. Used by directory.
|
||||||
|
* @param feInfo the file's encryption info
|
||||||
|
* @param storagePolicy ID which specifies storage policy
|
||||||
|
* @param ecPolicy the erasure coding policy
|
||||||
|
*/
|
||||||
|
HdfsNamedFileStatus(long length, boolean isdir, int replication,
|
||||||
|
long blocksize, long mtime, long atime,
|
||||||
|
FsPermission permission, Set<Flags> flags,
|
||||||
|
String owner, String group,
|
||||||
|
byte[] symlink, byte[] path, long fileId,
|
||||||
|
int childrenNum, FileEncryptionInfo feInfo,
|
||||||
|
byte storagePolicy, ErasureCodingPolicy ecPolicy) {
|
||||||
|
super(length, isdir, replication, blocksize, mtime, atime,
|
||||||
|
HdfsFileStatus.convert(isdir, symlink != null, permission, flags),
|
||||||
|
owner, group, null, null,
|
||||||
|
HdfsFileStatus.convert(flags));
|
||||||
|
this.uSymlink = symlink;
|
||||||
|
this.uPath = path;
|
||||||
|
this.fileId = fileId;
|
||||||
|
this.childrenNum = childrenNum;
|
||||||
|
this.feInfo = feInfo;
|
||||||
|
this.storagePolicy = storagePolicy;
|
||||||
|
this.ecPolicy = ecPolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setOwner(String owner) {
|
||||||
|
super.setOwner(owner);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setGroup(String group) {
|
||||||
|
super.setOwner(group);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isSymlink() {
|
||||||
|
return uSymlink != null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getSymlink() throws IOException {
|
||||||
|
if (isSymlink()) {
|
||||||
|
return new Path(DFSUtilClient.bytes2String(getSymlinkInBytes()));
|
||||||
|
}
|
||||||
|
throw new IOException("Path " + getPath() + " is not a symbolic link");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setPermission(FsPermission permission) {
|
||||||
|
super.setPermission(permission);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the Java UTF8 representation of the local name.
|
||||||
|
*
|
||||||
|
* @return the local name in java UTF8
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public byte[] getLocalNameInBytes() {
|
||||||
|
return uPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setSymlink(Path sym) {
|
||||||
|
uSymlink = DFSUtilClient.string2Bytes(sym.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Opaque referant for the symlink, to be resolved at the client.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public byte[] getSymlinkInBytes() {
|
||||||
|
return uSymlink;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getFileId() {
|
||||||
|
return fileId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public FileEncryptionInfo getFileEncryptionInfo() {
|
||||||
|
return feInfo;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the erasure coding policy if it's set.
|
||||||
|
*
|
||||||
|
* @return the erasure coding policy
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
|
return ecPolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getChildrenNum() {
|
||||||
|
return childrenNum;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the storage policy id */
|
||||||
|
@Override
|
||||||
|
public byte getStoragePolicy() {
|
||||||
|
return storagePolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
// satisfy findbugs
|
||||||
|
return super.equals(o);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
// satisfy findbugs
|
||||||
|
return super.hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
|
@ -1585,23 +1585,36 @@ public class PBHelperClient {
|
||||||
EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
|
EnumSet<HdfsFileStatus.Flags> flags = fs.hasFlags()
|
||||||
? convertFlags(fs.getFlags())
|
? convertFlags(fs.getFlags())
|
||||||
: convertFlags(fs.getPermission());
|
: convertFlags(fs.getPermission());
|
||||||
return new HdfsLocatedFileStatus(
|
return new HdfsFileStatus.Builder()
|
||||||
fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
|
.length(fs.getLength())
|
||||||
fs.getBlockReplication(), fs.getBlocksize(),
|
.isdir(fs.getFileType().equals(FileType.IS_DIR))
|
||||||
fs.getModificationTime(), fs.getAccessTime(),
|
.replication(fs.getBlockReplication())
|
||||||
convert(fs.getPermission()),
|
.blocksize(fs.getBlocksize())
|
||||||
flags,
|
.mtime(fs.getModificationTime())
|
||||||
fs.getOwner(), fs.getGroup(),
|
.atime(fs.getAccessTime())
|
||||||
fs.getFileType().equals(FileType.IS_SYMLINK) ?
|
.perm(convert(fs.getPermission()))
|
||||||
fs.getSymlink().toByteArray() : null,
|
.flags(flags)
|
||||||
fs.getPath().toByteArray(),
|
.owner(fs.getOwner())
|
||||||
fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID,
|
.group(fs.getGroup())
|
||||||
fs.hasLocations() ? convert(fs.getLocations()) : null,
|
.symlink(FileType.IS_SYMLINK.equals(fs.getFileType())
|
||||||
fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
|
? fs.getSymlink().toByteArray()
|
||||||
fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
|
: null)
|
||||||
fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
|
.path(fs.getPath().toByteArray())
|
||||||
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
.fileId(fs.hasFileId()
|
||||||
fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
|
? fs.getFileId()
|
||||||
|
: HdfsConstants.GRANDFATHER_INODE_ID)
|
||||||
|
.locations(fs.hasLocations() ? convert(fs.getLocations()) : null)
|
||||||
|
.children(fs.hasChildrenNum() ? fs.getChildrenNum() : -1)
|
||||||
|
.feInfo(fs.hasFileEncryptionInfo()
|
||||||
|
? convert(fs.getFileEncryptionInfo())
|
||||||
|
: null)
|
||||||
|
.storagePolicy(fs.hasStoragePolicy()
|
||||||
|
? (byte) fs.getStoragePolicy()
|
||||||
|
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED)
|
||||||
|
.ecPolicy(fs.hasEcPolicy()
|
||||||
|
? convertErasureCodingPolicy(fs.getEcPolicy())
|
||||||
|
: null)
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
|
private static EnumSet<HdfsFileStatus.Flags> convertFlags(int flags) {
|
||||||
|
@ -1864,9 +1877,9 @@ public class PBHelperClient {
|
||||||
if (dl == null)
|
if (dl == null)
|
||||||
return null;
|
return null;
|
||||||
List<HdfsFileStatusProto> partList = dl.getPartialListingList();
|
List<HdfsFileStatusProto> partList = dl.getPartialListingList();
|
||||||
return new DirectoryListing(partList.isEmpty() ?
|
return new DirectoryListing(partList.isEmpty()
|
||||||
new HdfsLocatedFileStatus[0] :
|
? new HdfsFileStatus[0]
|
||||||
convert(partList.toArray(new HdfsFileStatusProto[partList.size()])),
|
: convert(partList.toArray(new HdfsFileStatusProto[partList.size()])),
|
||||||
dl.getRemainingEntries());
|
dl.getRemainingEntries());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2163,7 +2176,7 @@ public class PBHelperClient {
|
||||||
}
|
}
|
||||||
if (fs instanceof HdfsLocatedFileStatus) {
|
if (fs instanceof HdfsLocatedFileStatus) {
|
||||||
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
|
final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
|
||||||
LocatedBlocks locations = lfs.getBlockLocations();
|
LocatedBlocks locations = lfs.getLocatedBlocks();
|
||||||
if (locations != null) {
|
if (locations != null) {
|
||||||
builder.setLocations(convert(locations));
|
builder.setLocations(convert(locations));
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import java.lang.reflect.Method;
|
||||||
|
import java.lang.reflect.Modifier;
|
||||||
|
import java.lang.reflect.Type;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
import static java.util.stream.Collectors.joining;
|
||||||
|
import static java.util.stream.Collectors.toSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
|
||||||
|
import org.junit.Test;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unit test verifying that {@link HdfsFileStatus} is a superset of
|
||||||
|
* {@link FileStatus}.
|
||||||
|
*/
|
||||||
|
public class TestHdfsFileStatusMethods {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testInterfaceSuperset() {
|
||||||
|
Set<MethodSignature> fsM = signatures(FileStatus.class);
|
||||||
|
Set<MethodSignature> hfsM = signatures(HdfsFileStatus.class);
|
||||||
|
hfsM.addAll(Stream.of(HdfsFileStatus.class.getInterfaces())
|
||||||
|
.flatMap(i -> Stream.of(i.getDeclaredMethods()))
|
||||||
|
.map(MethodSignature::new)
|
||||||
|
.collect(toSet()));
|
||||||
|
// HdfsFileStatus is not a concrete type
|
||||||
|
hfsM.addAll(signatures(Object.class));
|
||||||
|
assertTrue(fsM.removeAll(hfsM));
|
||||||
|
// verify that FileStatus is a subset of HdfsFileStatus
|
||||||
|
assertEquals(fsM.stream()
|
||||||
|
.map(MethodSignature::toString)
|
||||||
|
.collect(joining("\n")),
|
||||||
|
Collections.EMPTY_SET, fsM);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Map non-static, declared methods for this class to signatures. */
|
||||||
|
private static Set<MethodSignature> signatures(Class<?> c) {
|
||||||
|
return Stream.of(c.getDeclaredMethods())
|
||||||
|
.filter(m -> !Modifier.isStatic(m.getModifiers()))
|
||||||
|
.map(MethodSignature::new)
|
||||||
|
.collect(toSet());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class MethodSignature {
|
||||||
|
private final String name;
|
||||||
|
private final Type rval;
|
||||||
|
private final Type[] param;
|
||||||
|
MethodSignature(Method m) {
|
||||||
|
name = m.getName();
|
||||||
|
rval = m.getGenericReturnType();
|
||||||
|
param = m.getParameterTypes();
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return name.hashCode();
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* Methods are equal iff they have the same name, return type, and params
|
||||||
|
* (non-generic).
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (!(o instanceof MethodSignature)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
MethodSignature s = (MethodSignature) o;
|
||||||
|
return name.equals(s.name) &&
|
||||||
|
rval.equals(s.rval) &&
|
||||||
|
Arrays.equals(param, s.param);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append(rval).append(" ").append(name).append("(")
|
||||||
|
.append(Stream.of(param)
|
||||||
|
.map(Type::toString).collect(joining(",")))
|
||||||
|
.append(")");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1085,10 +1085,7 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
new FsPermissionExtension(permission, aBit, eBit, ecBit);
|
new FsPermissionExtension(permission, aBit, eBit, ecBit);
|
||||||
FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
||||||
replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
|
replication, blockSize, mTime, aTime, deprecatedPerm, owner, group,
|
||||||
null, path, aBit, eBit, ecBit);
|
null, path, FileStatus.attributes(aBit, eBit, ecBit, seBit));
|
||||||
if (seBit) {
|
|
||||||
fileStatus.setSnapShotEnabledFlag(seBit);
|
|
||||||
}
|
|
||||||
return fileStatus;
|
return fileStatus;
|
||||||
} else {
|
} else {
|
||||||
return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
return new FileStatus(len, FILE_TYPE.DIRECTORY == type,
|
||||||
|
|
|
@ -260,13 +260,6 @@
|
||||||
<Method name="visitFile" />
|
<Method name="visitFile" />
|
||||||
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
|
<Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
|
||||||
</Match>
|
</Match>
|
||||||
<!-- HdfsFileStatus is user-facing, but HdfsLocatedFileStatus is not.
|
|
||||||
Defensible compatibility choices over time create odd corners. -->
|
|
||||||
<Match>
|
|
||||||
<Class name="org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus" />
|
|
||||||
<Field name="locations" />
|
|
||||||
<Bug pattern="SE_TRANSIENT_FIELD_NOT_RESTORED" />
|
|
||||||
</Match>
|
|
||||||
<Match>
|
<Match>
|
||||||
<Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
|
<Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
|
||||||
<Method name="visitFile" />
|
<Method name="visitFile" />
|
||||||
|
|
|
@ -395,7 +395,7 @@ public class Mover {
|
||||||
status.getReplication());
|
status.getReplication());
|
||||||
|
|
||||||
final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
|
final ErasureCodingPolicy ecPolicy = status.getErasureCodingPolicy();
|
||||||
final LocatedBlocks locatedBlocks = status.getBlockLocations();
|
final LocatedBlocks locatedBlocks = status.getLocatedBlocks();
|
||||||
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
|
final boolean lastBlkComplete = locatedBlocks.isLastBlockComplete();
|
||||||
List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
|
List<LocatedBlock> lbs = locatedBlocks.getLocatedBlocks();
|
||||||
for (int i = 0; i < lbs.size(); i++) {
|
for (int i = 0; i < lbs.size(); i++) {
|
||||||
|
|
|
@ -261,7 +261,7 @@ class FSDirStatAndListingOp {
|
||||||
// This helps to prevent excessively large response payloads.
|
// This helps to prevent excessively large response payloads.
|
||||||
// Approximate #locations with locatedBlockCount() * repl_factor
|
// Approximate #locations with locatedBlockCount() * repl_factor
|
||||||
LocatedBlocks blks =
|
LocatedBlocks blks =
|
||||||
((HdfsLocatedFileStatus)listing[i]).getBlockLocations();
|
((HdfsLocatedFileStatus)listing[i]).getLocatedBlocks();
|
||||||
locationBudget -= (blks == null) ? 0 :
|
locationBudget -= (blks == null) ? 0 :
|
||||||
blks.locatedBlockCount() * listing[i].getReplication();
|
blks.locatedBlockCount() * listing[i].getReplication();
|
||||||
}
|
}
|
||||||
|
@ -486,7 +486,6 @@ class FSDirStatAndListingOp {
|
||||||
String owner, String group, byte[] symlink, byte[] path, long fileId,
|
String owner, String group, byte[] symlink, byte[] path, long fileId,
|
||||||
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
int childrenNum, FileEncryptionInfo feInfo, byte storagePolicy,
|
||||||
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
|
ErasureCodingPolicy ecPolicy, LocatedBlocks locations) {
|
||||||
if (locations == null) {
|
|
||||||
return new HdfsFileStatus.Builder()
|
return new HdfsFileStatus.Builder()
|
||||||
.length(length)
|
.length(length)
|
||||||
.isdir(isdir)
|
.isdir(isdir)
|
||||||
|
@ -505,12 +504,8 @@ class FSDirStatAndListingOp {
|
||||||
.feInfo(feInfo)
|
.feInfo(feInfo)
|
||||||
.storagePolicy(storagePolicy)
|
.storagePolicy(storagePolicy)
|
||||||
.ecPolicy(ecPolicy)
|
.ecPolicy(ecPolicy)
|
||||||
|
.locations(locations)
|
||||||
.build();
|
.build();
|
||||||
} else {
|
|
||||||
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
|
|
||||||
mtime, atime, permission, flags, owner, group, symlink, path,
|
|
||||||
fileId, locations, childrenNum, feInfo, storagePolicy, ecPolicy);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
|
private static ContentSummary getContentSummaryInt(FSDirectory fsd,
|
||||||
|
|
|
@ -1096,7 +1096,7 @@ public class TestBlockStoragePolicy {
|
||||||
int replicaNum, StorageType... types) {
|
int replicaNum, StorageType... types) {
|
||||||
List<StorageType> typeList = Lists.newArrayList();
|
List<StorageType> typeList = Lists.newArrayList();
|
||||||
Collections.addAll(typeList, types);
|
Collections.addAll(typeList, types);
|
||||||
LocatedBlocks lbs = status.getBlockLocations();
|
LocatedBlocks lbs = status.getLocatedBlocks();
|
||||||
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
|
Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
|
||||||
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
||||||
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
|
Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class TestFileStatusSerialization {
|
||||||
dib.reset(dob.getData(), 0, dob.getLength());
|
dib.reset(dob.getData(), 0, dob.getLength());
|
||||||
FileStatus fstat = new FileStatus();
|
FileStatus fstat = new FileStatus();
|
||||||
fstat.readFields(dib);
|
fstat.readFields(dib);
|
||||||
checkFields(stat, fstat);
|
checkFields((FileStatus) stat, fstat);
|
||||||
|
|
||||||
// FsPermisisonExtension used for HdfsFileStatus, not FileStatus,
|
// FsPermisisonExtension used for HdfsFileStatus, not FileStatus,
|
||||||
// attribute flags should still be preserved
|
// attribute flags should still be preserved
|
||||||
|
@ -133,7 +133,7 @@ public class TestFileStatusSerialization {
|
||||||
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
|
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
|
||||||
FileStatus deser = (FileStatus) ois.readObject();
|
FileStatus deser = (FileStatus) ois.readObject();
|
||||||
assertEquals(hs, deser);
|
assertEquals(hs, deser);
|
||||||
checkFields(hs, deser);
|
checkFields((FileStatus) hs, deser);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -168,8 +168,8 @@ public class TestFileStatusSerialization {
|
||||||
byte[] dst = fsp.toByteArray();
|
byte[] dst = fsp.toByteArray();
|
||||||
HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst);
|
HdfsFileStatusProto hsp2 = HdfsFileStatusProto.parseFrom(dst);
|
||||||
assertEquals(hsp, hsp2);
|
assertEquals(hsp, hsp2);
|
||||||
FileStatus hstat = PBHelperClient.convert(hsp);
|
FileStatus hstat = (FileStatus) PBHelperClient.convert(hsp);
|
||||||
FileStatus hstat2 = PBHelperClient.convert(hsp2);
|
FileStatus hstat2 = (FileStatus) PBHelperClient.convert(hsp2);
|
||||||
checkFields(hstat, hstat2);
|
checkFields(hstat, hstat2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -320,7 +320,7 @@ public class TestStorageMover {
|
||||||
}
|
}
|
||||||
final List<StorageType> types = policy.chooseStorageTypes(
|
final List<StorageType> types = policy.chooseStorageTypes(
|
||||||
status.getReplication());
|
status.getReplication());
|
||||||
for(LocatedBlock lb : fileStatus.getBlockLocations().getLocatedBlocks()) {
|
for(LocatedBlock lb : fileStatus.getLocatedBlocks().getLocatedBlocks()) {
|
||||||
final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
|
final Mover.StorageTypeDiff diff = new Mover.StorageTypeDiff(types,
|
||||||
lb.getStorageTypes());
|
lb.getStorageTypes());
|
||||||
Assert.assertTrue(fileStatus.getFullName(parent.toString())
|
Assert.assertTrue(fileStatus.getFullName(parent.toString())
|
||||||
|
|
Loading…
Reference in New Issue