HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1477367 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
884cbb681a
commit
e097f8404b
|
@ -321,3 +321,6 @@ Branch-2802 Snapshot (Unreleased)
|
|||
|
||||
HDFS-4578. Restrict snapshot IDs to 24-bit wide. (Arpit Agarwal via
|
||||
szetszwo)
|
||||
|
||||
HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer.
|
||||
(Jing Zhao via szetszwo)
|
||||
|
|
|
@ -342,9 +342,9 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
|
|||
dsDelta = diskspaceConsumed();
|
||||
} else if (last.getId() < lastSnapshotId) {
|
||||
dsDelta = computeFileSize(true, false) * getFileReplication();
|
||||
} else {
|
||||
Snapshot s = fileDiffList.searchSnapshotById(lastSnapshotId);
|
||||
dsDelta = diskspaceConsumed(s);
|
||||
} else {
|
||||
Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
|
||||
dsDelta = diskspaceConsumed(s);
|
||||
}
|
||||
} else {
|
||||
dsDelta = diskspaceConsumed();
|
||||
|
@ -441,7 +441,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
|
|||
* if includesLastUcBlock == false.
|
||||
* @return file size
|
||||
*/
|
||||
private final long computeFileSize(boolean includesLastUcBlock,
|
||||
public final long computeFileSize(boolean includesLastUcBlock,
|
||||
boolean usePreferredBlockSize4LastUcBlock) {
|
||||
if (blocks == null || blocks.length == 0) {
|
||||
return 0;
|
||||
|
|
|
@ -414,8 +414,11 @@ public abstract class INodeReference extends INode {
|
|||
@Override
|
||||
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
|
||||
boolean useCache, int lastSnapshotId) {
|
||||
Preconditions.checkState(lastSnapshotId == Snapshot.INVALID_ID
|
||||
|| this.lastSnapshotId <= lastSnapshotId);
|
||||
// if this.lastSnapshotId < lastSnapshotId, the rename of the referred
|
||||
// node happened before the rename of its ancestor. This should be
|
||||
// impossible since for WithName node we only count its children at the
|
||||
// time of the rename.
|
||||
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
|
||||
final INode referred = this.getReferredINode().asReference()
|
||||
.getReferredINode();
|
||||
// we cannot use cache for the referred node since its cached quota may
|
||||
|
|
|
@ -150,20 +150,6 @@ abstract class AbstractINodeDiffList<N extends INode,
|
|||
return last == null? null: last.getSnapshot();
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for the snapshot whose id is 1) no larger than the given id, and 2)
|
||||
* most close to the given id
|
||||
*/
|
||||
public final Snapshot searchSnapshotById(final int snapshotId) {
|
||||
final int i = Collections.binarySearch(diffs, snapshotId);
|
||||
if (i == -1) {
|
||||
return null;
|
||||
} else {
|
||||
int index = i < 0 ? -i - 2 : i;
|
||||
return diffs.get(index).getSnapshot();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the latest snapshot before a given snapshot.
|
||||
* @param anchor The returned snapshot must be taken before this given
|
||||
|
@ -201,23 +187,36 @@ abstract class AbstractINodeDiffList<N extends INode,
|
|||
* the corresponding snapshot state are the same.
|
||||
*/
|
||||
public final D getDiff(Snapshot snapshot) {
|
||||
if (snapshot == null) {
|
||||
// snapshot == null means the current state, therefore, return null.
|
||||
return getDiffById(snapshot == null ?
|
||||
Snapshot.INVALID_ID : snapshot.getId());
|
||||
}
|
||||
|
||||
private final D getDiffById(final int snapshotId) {
|
||||
if (snapshotId == Snapshot.INVALID_ID) {
|
||||
return null;
|
||||
}
|
||||
final int i = Collections.binarySearch(diffs, snapshot.getId());
|
||||
final int i = Collections.binarySearch(diffs, snapshotId);
|
||||
if (i >= 0) {
|
||||
// exact match
|
||||
return diffs.get(i);
|
||||
} else {
|
||||
// Exact match not found means that there were no changes between
|
||||
// given snapshot and the next state so that the diff for the given
|
||||
// snapshot was not recorded. Thus, return the next state.
|
||||
// snapshot was not recorded. Thus, return the next state.
|
||||
final int j = -i - 1;
|
||||
return j < diffs.size()? diffs.get(j): null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Search for the snapshot whose id is 1) no less than the given id,
|
||||
* and 2) most close to the given id.
|
||||
*/
|
||||
public final Snapshot getSnapshotById(final int snapshotId) {
|
||||
D diff = getDiffById(snapshotId);
|
||||
return diff == null ? null : diff.getSnapshot();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if changes have happened between two snapshots.
|
||||
* @param earlier The snapshot taken earlier
|
||||
|
|
|
@ -68,7 +68,10 @@ public interface FileWithSnapshot {
|
|||
long oldDiskspace = currentINode.diskspaceConsumed();
|
||||
if (removed.snapshotINode != null) {
|
||||
short replication = removed.snapshotINode.getFileReplication();
|
||||
if (replication > currentINode.getBlockReplication()) {
|
||||
short currentRepl = currentINode.getBlockReplication();
|
||||
if (currentRepl == 0) {
|
||||
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
|
||||
} else if (replication > currentRepl) {
|
||||
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
|
||||
* replication;
|
||||
}
|
||||
|
|
|
@ -914,25 +914,14 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
|||
return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||
}
|
||||
|
||||
final int diffNum = 0;
|
||||
Snapshot lastSnapshot = null;
|
||||
Snapshot lastInDiff = diffs.getLastSnapshot();
|
||||
// if lastSnapshotId > lastInDiff.getId(), the snapshot diff associated with
|
||||
// lastSnapshotId must have been deleted. We should call
|
||||
// getChildrenList(null) to get the children list for the continuous
|
||||
// computation. In the meanwhile, there must be some snapshot diff whose
|
||||
// snapshot id is no less than lastSnapshotId. Otherwise the WithName node
|
||||
// itself should have been deleted.
|
||||
if (lastInDiff != null && lastInDiff.getId() >= lastSnapshotId) {
|
||||
lastSnapshot = diffs.searchSnapshotById(lastSnapshotId);
|
||||
}
|
||||
Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);
|
||||
|
||||
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
|
||||
for (INode child : childrenList) {
|
||||
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||
}
|
||||
|
||||
counts.add(Quota.NAMESPACE, diffNum + 1);
|
||||
counts.add(Quota.NAMESPACE, 1);
|
||||
return counts;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
|||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -129,7 +130,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
-40, -41, -42, -43};
|
||||
private int imageVersion = 0;
|
||||
|
||||
private final Map<String, String> nodeMap = new HashMap<String, String>();
|
||||
private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
|
||||
private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see ImageLoader#canProcessVersion(int)
|
||||
|
@ -196,7 +198,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
}
|
||||
}
|
||||
processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
|
||||
nodeMap.clear();
|
||||
subtreeMap.clear();
|
||||
dirNodeMap.clear();
|
||||
|
||||
processINodesUC(in, v, skipBlocks);
|
||||
|
||||
|
@ -441,10 +444,11 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
*/
|
||||
private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
|
||||
boolean skipBlocks) throws IOException {
|
||||
// 1. load dir name
|
||||
String dirName = FSImageSerialization.readString(in);
|
||||
// 1. load dir node id
|
||||
long inodeId = in.readLong();
|
||||
|
||||
String oldValue = nodeMap.put(dirName, dirName);
|
||||
String dirName = dirNodeMap.get(inodeId);
|
||||
String oldValue = subtreeMap.put(inodeId, dirName);
|
||||
if (oldValue != null) { // the subtree has been visited
|
||||
return;
|
||||
}
|
||||
|
@ -581,6 +585,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
throws IOException {
|
||||
boolean supportSnapshot =
|
||||
LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
|
||||
boolean supportInodeId =
|
||||
LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
|
||||
|
||||
v.visitEnclosingElement(ImageElement.INODE);
|
||||
String pathName = FSImageSerialization.readString(in);
|
||||
|
@ -591,9 +597,11 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
}
|
||||
}
|
||||
|
||||
long inodeId = INodeId.GRANDFATHER_INODE_ID;
|
||||
v.visit(ImageElement.INODE_PATH, pathName);
|
||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
|
||||
v.visit(ImageElement.INODE_ID, in.readLong());
|
||||
if (supportInodeId) {
|
||||
inodeId = in.readLong();
|
||||
v.visit(ImageElement.INODE_ID, inodeId);
|
||||
}
|
||||
v.visit(ImageElement.REPLICATION, in.readShort());
|
||||
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
||||
|
@ -619,6 +627,9 @@ class ImageLoaderCurrent implements ImageLoader {
|
|||
}
|
||||
}
|
||||
} else if (numBlocks == -1) { // Directory
|
||||
if (supportSnapshot && supportInodeId) {
|
||||
dirNodeMap.put(inodeId, pathName);
|
||||
}
|
||||
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
|
||||
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||
|
|
Loading…
Reference in New Issue