HDFS-10851. FSDirStatAndListingOp: stop passing path as string. Contributed by Daryn Sharp.
This commit is contained in:
parent
56ec975ea9
commit
2551ff80b7
|
@ -152,7 +152,6 @@ class FSDirAclOp {
|
|||
fsd.readLock();
|
||||
try {
|
||||
INodesInPath iip = fsd.resolvePath(pc, src);
|
||||
src = iip.getPath();
|
||||
// There is no real inode for the path ending in ".snapshot", so return a
|
||||
// non-null, unpopulated AclStatus. This is similar to getFileInfo.
|
||||
if (iip.isDotSnapshotDir() && fsd.getINode4DotSnapshot(iip) != null) {
|
||||
|
@ -163,8 +162,7 @@ class FSDirAclOp {
|
|||
}
|
||||
INode inode = FSDirectory.resolveLastINode(iip);
|
||||
int snapshotId = iip.getPathSnapshotId();
|
||||
List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
|
||||
inode.getLocalNameBytes(), inode, snapshotId));
|
||||
List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(iip));
|
||||
FsPermission fsPermission = inode.getFsPermission(snapshotId);
|
||||
return new AclStatus.Builder()
|
||||
.owner(inode.getUserName()).group(inode.getGroupName())
|
||||
|
|
|
@ -51,15 +51,12 @@ import static org.apache.hadoop.util.Time.now;
|
|||
class FSDirStatAndListingOp {
|
||||
static DirectoryListing getListingInt(FSDirectory fsd, final String srcArg,
|
||||
byte[] startAfter, boolean needLocation) throws IOException {
|
||||
String src = null;
|
||||
|
||||
final INodesInPath iip;
|
||||
if (fsd.isPermissionEnabled()) {
|
||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||
iip = fsd.resolvePath(pc, srcArg);
|
||||
src = iip.getPath();
|
||||
} else {
|
||||
src = FSDirectory.resolvePath(srcArg, fsd);
|
||||
String src = FSDirectory.resolvePath(srcArg, fsd);
|
||||
iip = fsd.getINodesInPath(src, true);
|
||||
}
|
||||
|
||||
|
@ -90,7 +87,7 @@ class FSDirStatAndListingOp {
|
|||
}
|
||||
isSuperUser = pc.isSuperUser();
|
||||
}
|
||||
return getListing(fsd, iip, src, startAfter, needLocation, isSuperUser);
|
||||
return getListing(fsd, iip, startAfter, needLocation, isSuperUser);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -159,7 +156,6 @@ class FSDirStatAndListingOp {
|
|||
"Negative length is not supported. File: " + src);
|
||||
CacheManager cm = fsd.getFSNamesystem().getCacheManager();
|
||||
BlockManager bm = fsd.getBlockManager();
|
||||
boolean isReservedName = FSDirectory.isReservedRawName(src);
|
||||
fsd.readLock();
|
||||
try {
|
||||
final INodesInPath iip = fsd.resolvePath(pc, src);
|
||||
|
@ -182,7 +178,7 @@ class FSDirStatAndListingOp {
|
|||
isUc = false;
|
||||
}
|
||||
|
||||
final FileEncryptionInfo feInfo = isReservedName ? null
|
||||
final FileEncryptionInfo feInfo = iip.isRaw() ? null
|
||||
: FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, inode,
|
||||
iip.getPathSnapshotId(), iip);
|
||||
|
||||
|
@ -221,42 +217,39 @@ class FSDirStatAndListingOp {
|
|||
* @param fsd FSDirectory
|
||||
* @param iip the INodesInPath instance containing all the INodes along the
|
||||
* path
|
||||
* @param src the directory name
|
||||
* @param startAfter the name to start listing after
|
||||
* @param needLocation if block locations are returned
|
||||
* @param includeStoragePolicy if storage policy is returned
|
||||
* @return a partial listing starting after startAfter
|
||||
*/
|
||||
private static DirectoryListing getListing(FSDirectory fsd, INodesInPath iip,
|
||||
String src, byte[] startAfter, boolean needLocation, boolean isSuperUser)
|
||||
byte[] startAfter, boolean needLocation, boolean includeStoragePolicy)
|
||||
throws IOException {
|
||||
String srcs = FSDirectory.normalizePath(src);
|
||||
if (FSDirectory.isExactReservedName(srcs)) {
|
||||
if (FSDirectory.isExactReservedName(iip.getPathComponents())) {
|
||||
return getReservedListing(fsd);
|
||||
}
|
||||
|
||||
fsd.readLock();
|
||||
try {
|
||||
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
|
||||
return getSnapshotsListing(fsd, srcs, startAfter);
|
||||
if (iip.isDotSnapshotDir()) {
|
||||
return getSnapshotsListing(fsd, iip, startAfter);
|
||||
}
|
||||
final int snapshot = iip.getPathSnapshotId();
|
||||
final INode targetNode = iip.getLastINode();
|
||||
if (targetNode == null)
|
||||
if (targetNode == null) {
|
||||
return null;
|
||||
byte parentStoragePolicy = isSuperUser ?
|
||||
targetNode.getStoragePolicyID() : HdfsConstants
|
||||
.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
}
|
||||
|
||||
byte parentStoragePolicy = includeStoragePolicy
|
||||
? targetNode.getStoragePolicyID()
|
||||
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
|
||||
if (!targetNode.isDirectory()) {
|
||||
// return the file's status. note that the iip already includes the
|
||||
// target INode
|
||||
INodeAttributes nodeAttrs = getINodeAttributes(
|
||||
fsd, src, HdfsFileStatus.EMPTY_NAME, targetNode,
|
||||
snapshot);
|
||||
return new DirectoryListing(
|
||||
new HdfsFileStatus[]{ createFileStatus(
|
||||
fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
|
||||
needLocation, parentStoragePolicy, iip)
|
||||
fsd, iip, null, parentStoragePolicy, needLocation)
|
||||
}, 0);
|
||||
}
|
||||
|
||||
|
@ -270,20 +263,15 @@ class FSDirStatAndListingOp {
|
|||
int listingCnt = 0;
|
||||
HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
|
||||
for (int i = 0; i < numOfListing && locationBudget > 0; i++) {
|
||||
INode cur = contents.get(startChild+i);
|
||||
byte curPolicy = isSuperUser && !cur.isSymlink()?
|
||||
cur.getLocalStoragePolicyID():
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
INodeAttributes nodeAttrs = getINodeAttributes(
|
||||
fsd, src, cur.getLocalNameBytes(), cur,
|
||||
snapshot);
|
||||
final INodesInPath iipWithChild = INodesInPath.append(iip, cur,
|
||||
cur.getLocalNameBytes());
|
||||
listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), nodeAttrs,
|
||||
needLocation, getStoragePolicyID(curPolicy, parentStoragePolicy),
|
||||
iipWithChild);
|
||||
INode child = contents.get(startChild+i);
|
||||
byte childStoragePolicy = (includeStoragePolicy && !child.isSymlink())
|
||||
? getStoragePolicyID(child.getLocalStoragePolicyID(),
|
||||
parentStoragePolicy)
|
||||
: parentStoragePolicy;
|
||||
listing[i] =
|
||||
createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
|
||||
listingCnt++;
|
||||
if (needLocation) {
|
||||
if (listing[i] instanceof HdfsLocatedFileStatus) {
|
||||
// Once we hit lsLimit locations, stop.
|
||||
// This helps to prevent excessively large response payloads.
|
||||
// Approximate #locations with locatedBlockCount() * repl_factor
|
||||
|
@ -308,17 +296,16 @@ class FSDirStatAndListingOp {
|
|||
* Get a listing of all the snapshots of a snapshottable directory
|
||||
*/
|
||||
private static DirectoryListing getSnapshotsListing(
|
||||
FSDirectory fsd, String src, byte[] startAfter)
|
||||
FSDirectory fsd, INodesInPath iip, byte[] startAfter)
|
||||
throws IOException {
|
||||
Preconditions.checkState(fsd.hasReadLock());
|
||||
Preconditions.checkArgument(
|
||||
src.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR),
|
||||
"%s does not end with %s", src, HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
|
||||
|
||||
final String dirPath = FSDirectory.normalizePath(src.substring(0,
|
||||
src.length() - HdfsConstants.DOT_SNAPSHOT_DIR.length()));
|
||||
|
||||
final INode node = fsd.getINode(dirPath);
|
||||
Preconditions.checkArgument(iip.isDotSnapshotDir(),
|
||||
"%s does not end with %s",
|
||||
iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
|
||||
// drop off the null .snapshot component
|
||||
iip = iip.getParentINodesInPath();
|
||||
final String dirPath = iip.getPath();
|
||||
final INode node = iip.getLastINode();
|
||||
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
|
||||
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
|
||||
if (sf == null) {
|
||||
|
@ -332,13 +319,8 @@ class FSDirStatAndListingOp {
|
|||
final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
|
||||
for (int i = 0; i < numOfListing; i++) {
|
||||
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
|
||||
INodeAttributes nodeAttrs = getINodeAttributes(
|
||||
fsd, src, sRoot.getLocalNameBytes(),
|
||||
node, Snapshot.CURRENT_STATE_ID);
|
||||
listing[i] = createFileStatus(
|
||||
fsd, sRoot.getLocalNameBytes(), nodeAttrs,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||
INodesInPath.fromINode(sRoot));
|
||||
listing[i] = createFileStatus(fsd, iip, sRoot,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
|
||||
}
|
||||
return new DirectoryListing(
|
||||
listing, snapshots.size() - skipSize - numOfListing);
|
||||
|
@ -356,7 +338,6 @@ class FSDirStatAndListingOp {
|
|||
/** Get the file info for a specific file.
|
||||
* @param fsd FSDirectory
|
||||
* @param iip The path to the file, the file is included
|
||||
* @param isRawPath true if a /.reserved/raw pathname was passed by the user
|
||||
* @param includeStoragePolicy whether to include storage policy
|
||||
* @return object containing information regarding the file
|
||||
* or null if file not found
|
||||
|
@ -369,15 +350,10 @@ class FSDirStatAndListingOp {
|
|||
if (node == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
byte policyId = includeStoragePolicy && !node.isSymlink() ?
|
||||
node.getStoragePolicyID() :
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
INodeAttributes nodeAttrs = getINodeAttributes(fsd, iip.getPath(),
|
||||
HdfsFileStatus.EMPTY_NAME,
|
||||
node, iip.getPathSnapshotId());
|
||||
return createFileStatus(fsd, HdfsFileStatus.EMPTY_NAME, nodeAttrs,
|
||||
policyId, iip);
|
||||
byte policy = (includeStoragePolicy && !node.isSymlink())
|
||||
? node.getStoragePolicyID()
|
||||
: HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
|
||||
return createFileStatus(fsd, iip, null, policy, false);
|
||||
} finally {
|
||||
fsd.readUnlock();
|
||||
}
|
||||
|
@ -404,48 +380,41 @@ class FSDirStatAndListingOp {
|
|||
}
|
||||
|
||||
/**
|
||||
* create an hdfs file status from an inode
|
||||
* create a hdfs file status from an iip.
|
||||
* @param fsd FSDirectory
|
||||
* @param iip The INodesInPath containing the INodeFile and its ancestors
|
||||
* @return HdfsFileStatus without locations or storage policy
|
||||
*/
|
||||
static HdfsFileStatus createFileStatusForEditLog(
|
||||
FSDirectory fsd, INodesInPath iip) throws IOException {
|
||||
return createFileStatus(fsd, iip,
|
||||
null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* create a hdfs file status from an iip.
|
||||
*
|
||||
* @param fsd FSDirectory
|
||||
* @param path the local name
|
||||
* @param iip The INodesInPath containing the INodeFile and its ancestors.
|
||||
* @param child for a directory listing of the iip, else null
|
||||
* @param storagePolicy for the path or closest ancestor
|
||||
* @param needLocation if block locations need to be included or not
|
||||
* @param isRawPath true if this is being called on behalf of a path in
|
||||
* /.reserved/raw
|
||||
* @param iip the INodesInPath containing the target INode and its ancestors
|
||||
* @param includeStoragePolicy if storage policy should be returned
|
||||
* @return a file status
|
||||
* @throws java.io.IOException if any error occurs
|
||||
*/
|
||||
private static HdfsFileStatus createFileStatus(
|
||||
FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
|
||||
boolean needLocation, byte storagePolicy, INodesInPath iip)
|
||||
throws IOException {
|
||||
if (needLocation) {
|
||||
return createLocatedFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
|
||||
} else {
|
||||
return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
|
||||
FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy,
|
||||
boolean needLocation) throws IOException {
|
||||
assert fsd.hasReadLock();
|
||||
// only directory listing sets the status name.
|
||||
byte[] name = HdfsFileStatus.EMPTY_NAME;
|
||||
if (child != null) {
|
||||
name = child.getLocalNameBytes();
|
||||
// have to do this for EC and EZ lookups...
|
||||
iip = INodesInPath.append(iip, child, name);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create FileStatus for an given INodeFile.
|
||||
* @param iip The INodesInPath containing the INodeFile and its ancestors
|
||||
*/
|
||||
static HdfsFileStatus createFileStatusForEditLog(
|
||||
FSDirectory fsd, String fullPath, byte[] path,
|
||||
byte storagePolicy, int snapshot, boolean isRawPath,
|
||||
INodesInPath iip) throws IOException {
|
||||
INodeAttributes nodeAttrs = getINodeAttributes(
|
||||
fsd, fullPath, path, iip.getLastINode(), snapshot);
|
||||
return createFileStatus(fsd, path, nodeAttrs, storagePolicy, iip);
|
||||
}
|
||||
|
||||
/**
|
||||
* create file status for a given INode
|
||||
* @param iip the INodesInPath containing the target INode and its ancestors
|
||||
*/
|
||||
static HdfsFileStatus createFileStatus(
|
||||
FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
|
||||
byte storagePolicy, INodesInPath iip) throws IOException {
|
||||
long size = 0; // length is zero for directories
|
||||
short replication = 0;
|
||||
long blocksize = 0;
|
||||
|
@ -453,6 +422,7 @@ class FSDirStatAndListingOp {
|
|||
final INode node = iip.getLastINode();
|
||||
final int snapshot = iip.getPathSnapshotId();
|
||||
final boolean isRawPath = iip.isRaw();
|
||||
LocatedBlocks loc = null;
|
||||
|
||||
final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
|
||||
.getFileEncryptionInfo(fsd, node, snapshot, iip);
|
||||
|
@ -464,6 +434,18 @@ class FSDirStatAndListingOp {
|
|||
blocksize = fileNode.getPreferredBlockSize();
|
||||
isEncrypted = (feInfo != null)
|
||||
|| (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
|
||||
if (needLocation) {
|
||||
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
|
||||
final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
|
||||
final long fileSize = !inSnapshot && isUc
|
||||
? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
|
||||
loc = fsd.getBlockManager().createLocatedBlocks(
|
||||
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
|
||||
inSnapshot, feInfo);
|
||||
if (loc == null) {
|
||||
loc = new LocatedBlocks();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
|
||||
}
|
||||
|
@ -471,7 +453,8 @@ class FSDirStatAndListingOp {
|
|||
int childrenNum = node.isDirectory() ?
|
||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||
|
||||
return new HdfsFileStatus(
|
||||
INodeAttributes nodeAttrs = fsd.getAttributes(iip);
|
||||
HdfsFileStatus status = createFileStatus(
|
||||
size,
|
||||
node.isDirectory(),
|
||||
replication,
|
||||
|
@ -482,70 +465,13 @@ class FSDirStatAndListingOp {
|
|||
nodeAttrs.getUserName(),
|
||||
nodeAttrs.getGroupName(),
|
||||
node.isSymlink() ? node.asSymlink().getSymlink() : null,
|
||||
path,
|
||||
name,
|
||||
node.getId(),
|
||||
childrenNum,
|
||||
feInfo,
|
||||
storagePolicy);
|
||||
}
|
||||
storagePolicy,
|
||||
loc);
|
||||
|
||||
private static INodeAttributes getINodeAttributes(
|
||||
FSDirectory fsd, String fullPath, byte[] path, INode node, int snapshot) {
|
||||
return fsd.getAttributes(fullPath, path, node, snapshot);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create FileStatus with location info by file INode
|
||||
* @param iip the INodesInPath containing the target INode and its ancestors
|
||||
*/
|
||||
private static HdfsFileStatus createLocatedFileStatus(
|
||||
FSDirectory fsd, byte[] path, INodeAttributes nodeAttrs,
|
||||
byte storagePolicy, INodesInPath iip) throws IOException {
|
||||
assert fsd.hasReadLock();
|
||||
long size = 0; // length is zero for directories
|
||||
short replication = 0;
|
||||
long blocksize = 0;
|
||||
LocatedBlocks loc = null;
|
||||
final boolean isEncrypted;
|
||||
final INode node = iip.getLastINode();
|
||||
final int snapshot = iip.getPathSnapshotId();
|
||||
final boolean isRawPath = iip.isRaw();
|
||||
|
||||
final FileEncryptionInfo feInfo = isRawPath ? null : FSDirEncryptionZoneOp
|
||||
.getFileEncryptionInfo(fsd, node, snapshot, iip);
|
||||
if (node.isFile()) {
|
||||
final INodeFile fileNode = node.asFile();
|
||||
size = fileNode.computeFileSize(snapshot);
|
||||
replication = fileNode.getFileReplication(snapshot);
|
||||
blocksize = fileNode.getPreferredBlockSize();
|
||||
|
||||
final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
|
||||
final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
|
||||
final long fileSize = !inSnapshot && isUc ?
|
||||
fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
|
||||
|
||||
loc = fsd.getBlockManager().createLocatedBlocks(
|
||||
fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
|
||||
inSnapshot, feInfo);
|
||||
if (loc == null) {
|
||||
loc = new LocatedBlocks();
|
||||
}
|
||||
isEncrypted = (feInfo != null)
|
||||
|| (isRawPath && FSDirEncryptionZoneOp.isInAnEZ(fsd, iip));
|
||||
} else {
|
||||
isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
|
||||
}
|
||||
int childrenNum = node.isDirectory() ?
|
||||
node.asDirectory().getChildrenNum(snapshot) : 0;
|
||||
|
||||
HdfsLocatedFileStatus status =
|
||||
new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
|
||||
blocksize, node.getModificationTime(snapshot),
|
||||
node.getAccessTime(snapshot),
|
||||
getPermissionForFileStatus(nodeAttrs, isEncrypted),
|
||||
nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
|
||||
node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
|
||||
node.getId(), loc, childrenNum, feInfo, storagePolicy);
|
||||
// Set caching information for the located blocks.
|
||||
if (loc != null) {
|
||||
CacheManager cacheManager = fsd.getFSNamesystem().getCacheManager();
|
||||
|
@ -556,6 +482,23 @@ class FSDirStatAndListingOp {
|
|||
return status;
|
||||
}
|
||||
|
||||
private static HdfsFileStatus createFileStatus(long length, boolean isdir,
|
||||
int replication, long blocksize, long mtime,
|
||||
long atime, FsPermission permission, String owner, String group,
|
||||
byte[] symlink, byte[] path, long fileId, int childrenNum,
|
||||
FileEncryptionInfo feInfo, byte storagePolicy,
|
||||
LocatedBlocks locations) {
|
||||
if (locations == null) {
|
||||
return new HdfsFileStatus(length, isdir, replication, blocksize,
|
||||
mtime, atime, permission, owner, group, symlink, path, fileId,
|
||||
childrenNum, feInfo, storagePolicy);
|
||||
} else {
|
||||
return new HdfsLocatedFileStatus(length, isdir, replication, blocksize,
|
||||
mtime, atime, permission, owner, group, symlink, path, fileId,
|
||||
locations, childrenNum, feInfo, storagePolicy);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an inode's FsPermission for use in an outbound FileStatus. If the
|
||||
* inode has an ACL or is for an encrypted file/dir, then this method will
|
||||
|
|
|
@ -429,11 +429,7 @@ class FSDirXAttrOp {
|
|||
throws IOException {
|
||||
fsd.readLock();
|
||||
try {
|
||||
String src = iip.getPath();
|
||||
INode inode = FSDirectory.resolveLastINode(iip);
|
||||
int snapshotId = iip.getPathSnapshotId();
|
||||
return XAttrStorage.readINodeXAttrs(fsd.getAttributes(src,
|
||||
inode.getLocalNameBytes(), inode, snapshotId));
|
||||
return XAttrStorage.readINodeXAttrs(fsd.getAttributes(iip));
|
||||
} finally {
|
||||
fsd.readUnlock();
|
||||
}
|
||||
|
|
|
@ -1723,14 +1723,19 @@ public class FSDirectory implements Closeable {
|
|||
inodeId.setCurrentValue(newValue);
|
||||
}
|
||||
|
||||
INodeAttributes getAttributes(String fullPath, byte[] path,
|
||||
INode node, int snapshot) {
|
||||
INodeAttributes getAttributes(INodesInPath iip)
|
||||
throws FileNotFoundException {
|
||||
INode node = FSDirectory.resolveLastINode(iip);
|
||||
int snapshot = iip.getPathSnapshotId();
|
||||
INodeAttributes nodeAttrs = node.getSnapshotINode(snapshot);
|
||||
if (attributeProvider != null) {
|
||||
fullPath = fullPath
|
||||
+ (fullPath.endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR)
|
||||
+ DFSUtil.bytes2String(path);
|
||||
nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs);
|
||||
// permission checking sends the full components array including the
|
||||
// first empty component for the root. however file status
|
||||
// related calls are expected to strip out the root component according
|
||||
// to TestINodeAttributeProvider.
|
||||
byte[][] components = iip.getPathComponents();
|
||||
components = Arrays.copyOfRange(components, 1, components.length);
|
||||
nodeAttrs = attributeProvider.getAttributes(components, nodeAttrs);
|
||||
}
|
||||
return nodeAttrs;
|
||||
}
|
||||
|
|
|
@ -379,10 +379,8 @@ public class FSEditLogLoader {
|
|||
|
||||
// add the op into retry cache if necessary
|
||||
if (toAddRetryCache) {
|
||||
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
|
||||
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
|
||||
false, iip);
|
||||
HdfsFileStatus stat =
|
||||
FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
|
||||
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
|
||||
addCloseOp.rpcCallId, stat);
|
||||
}
|
||||
|
@ -398,10 +396,8 @@ public class FSEditLogLoader {
|
|||
false);
|
||||
// add the op into retry cache if necessary
|
||||
if (toAddRetryCache) {
|
||||
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
|
||||
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||
Snapshot.CURRENT_STATE_ID, false, iip);
|
||||
HdfsFileStatus stat =
|
||||
FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
|
||||
fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
|
||||
addCloseOp.rpcCallId, new LastBlockWithStatus(lb, stat));
|
||||
}
|
||||
|
@ -472,10 +468,8 @@ public class FSEditLogLoader {
|
|||
false, false);
|
||||
// add the op into retry cache if necessary
|
||||
if (toAddRetryCache) {
|
||||
HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatusForEditLog(
|
||||
fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
|
||||
Snapshot.CURRENT_STATE_ID, false, iip);
|
||||
HdfsFileStatus stat =
|
||||
FSDirStatAndListingOp.createFileStatusForEditLog(fsDir, iip);
|
||||
fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,
|
||||
appendOp.rpcCallId, new LastBlockWithStatus(lb, stat));
|
||||
}
|
||||
|
|
|
@ -17,13 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -87,7 +80,7 @@ public abstract class INodeAttributeProvider {
|
|||
*/
|
||||
public abstract void stop();
|
||||
|
||||
@VisibleForTesting
|
||||
@Deprecated
|
||||
String[] getPathElements(String path) {
|
||||
path = path.trim();
|
||||
if (path.charAt(0) != Path.SEPARATOR_CHAR) {
|
||||
|
@ -115,6 +108,7 @@ public abstract class INodeAttributeProvider {
|
|||
return pathElements;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) {
|
||||
return getAttributes(getPathElements(fullPath), inode);
|
||||
}
|
||||
|
|
|
@ -399,7 +399,7 @@ public class INodesInPath {
|
|||
*/
|
||||
private INodesInPath getAncestorINodesInPath(int length) {
|
||||
Preconditions.checkArgument(length >= 0 && length < inodes.length);
|
||||
Preconditions.checkState(!isSnapshot());
|
||||
Preconditions.checkState(isDotSnapshotDir() || !isSnapshot());
|
||||
final INode[] anodes = new INode[length];
|
||||
final byte[][] apath = new byte[length][];
|
||||
System.arraycopy(this.inodes, 0, anodes, 0, length);
|
||||
|
|
|
@ -166,6 +166,9 @@ public class TestSnapshotPathINodes {
|
|||
assertEquals(sub1.toString(), nodesInPath.getPath(2));
|
||||
assertEquals(file1.toString(), nodesInPath.getPath(3));
|
||||
|
||||
assertEquals(file1.getParent().toString(),
|
||||
nodesInPath.getParentINodesInPath().getPath());
|
||||
|
||||
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
|
||||
assertEquals(nodesInPath.length(), components.length);
|
||||
assertSnapshot(nodesInPath, false, null, -1);
|
||||
|
@ -212,6 +215,9 @@ public class TestSnapshotPathINodes {
|
|||
// The number of INodes returned should still be components.length
|
||||
// since we put a null in the inode array for ".snapshot"
|
||||
assertEquals(nodesInPath.length(), components.length);
|
||||
// ensure parent inodes can strip the .snapshot
|
||||
assertEquals(sub1.toString(),
|
||||
nodesInPath.getParentINodesInPath().getPath());
|
||||
|
||||
// No SnapshotRoot dir is included in the resolved inodes
|
||||
assertSnapshot(nodesInPath, true, snapshot, -1);
|
||||
|
|
Loading…
Reference in New Issue