HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer. Contributed by Jing Zhao
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1477367 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
884cbb681a
commit
e097f8404b
|
@ -321,3 +321,6 @@ Branch-2802 Snapshot (Unreleased)
|
||||||
|
|
||||||
HDFS-4578. Restrict snapshot IDs to 24-bit wide. (Arpit Agarwal via
|
HDFS-4578. Restrict snapshot IDs to 24-bit wide. (Arpit Agarwal via
|
||||||
szetszwo)
|
szetszwo)
|
||||||
|
|
||||||
|
HDFS-4773. Fix bugs in quota usage computation and OfflineImageViewer.
|
||||||
|
(Jing Zhao via szetszwo)
|
||||||
|
|
|
@ -342,9 +342,9 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
|
||||||
dsDelta = diskspaceConsumed();
|
dsDelta = diskspaceConsumed();
|
||||||
} else if (last.getId() < lastSnapshotId) {
|
} else if (last.getId() < lastSnapshotId) {
|
||||||
dsDelta = computeFileSize(true, false) * getFileReplication();
|
dsDelta = computeFileSize(true, false) * getFileReplication();
|
||||||
} else {
|
} else {
|
||||||
Snapshot s = fileDiffList.searchSnapshotById(lastSnapshotId);
|
Snapshot s = fileDiffList.getSnapshotById(lastSnapshotId);
|
||||||
dsDelta = diskspaceConsumed(s);
|
dsDelta = diskspaceConsumed(s);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dsDelta = diskspaceConsumed();
|
dsDelta = diskspaceConsumed();
|
||||||
|
@ -441,7 +441,7 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
|
||||||
* if includesLastUcBlock == false.
|
* if includesLastUcBlock == false.
|
||||||
* @return file size
|
* @return file size
|
||||||
*/
|
*/
|
||||||
private final long computeFileSize(boolean includesLastUcBlock,
|
public final long computeFileSize(boolean includesLastUcBlock,
|
||||||
boolean usePreferredBlockSize4LastUcBlock) {
|
boolean usePreferredBlockSize4LastUcBlock) {
|
||||||
if (blocks == null || blocks.length == 0) {
|
if (blocks == null || blocks.length == 0) {
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -414,8 +414,11 @@ public abstract class INodeReference extends INode {
|
||||||
@Override
|
@Override
|
||||||
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
|
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
|
||||||
boolean useCache, int lastSnapshotId) {
|
boolean useCache, int lastSnapshotId) {
|
||||||
Preconditions.checkState(lastSnapshotId == Snapshot.INVALID_ID
|
// if this.lastSnapshotId < lastSnapshotId, the rename of the referred
|
||||||
|| this.lastSnapshotId <= lastSnapshotId);
|
// node happened before the rename of its ancestor. This should be
|
||||||
|
// impossible since for WithName node we only count its children at the
|
||||||
|
// time of the rename.
|
||||||
|
Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId);
|
||||||
final INode referred = this.getReferredINode().asReference()
|
final INode referred = this.getReferredINode().asReference()
|
||||||
.getReferredINode();
|
.getReferredINode();
|
||||||
// we cannot use cache for the referred node since its cached quota may
|
// we cannot use cache for the referred node since its cached quota may
|
||||||
|
|
|
@ -150,20 +150,6 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
return last == null? null: last.getSnapshot();
|
return last == null? null: last.getSnapshot();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Search for the snapshot whose id is 1) no larger than the given id, and 2)
|
|
||||||
* most close to the given id
|
|
||||||
*/
|
|
||||||
public final Snapshot searchSnapshotById(final int snapshotId) {
|
|
||||||
final int i = Collections.binarySearch(diffs, snapshotId);
|
|
||||||
if (i == -1) {
|
|
||||||
return null;
|
|
||||||
} else {
|
|
||||||
int index = i < 0 ? -i - 2 : i;
|
|
||||||
return diffs.get(index).getSnapshot();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Find the latest snapshot before a given snapshot.
|
* Find the latest snapshot before a given snapshot.
|
||||||
* @param anchor The returned snapshot must be taken before this given
|
* @param anchor The returned snapshot must be taken before this given
|
||||||
|
@ -201,23 +187,36 @@ abstract class AbstractINodeDiffList<N extends INode,
|
||||||
* the corresponding snapshot state are the same.
|
* the corresponding snapshot state are the same.
|
||||||
*/
|
*/
|
||||||
public final D getDiff(Snapshot snapshot) {
|
public final D getDiff(Snapshot snapshot) {
|
||||||
if (snapshot == null) {
|
return getDiffById(snapshot == null ?
|
||||||
// snapshot == null means the current state, therefore, return null.
|
Snapshot.INVALID_ID : snapshot.getId());
|
||||||
|
}
|
||||||
|
|
||||||
|
private final D getDiffById(final int snapshotId) {
|
||||||
|
if (snapshotId == Snapshot.INVALID_ID) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
final int i = Collections.binarySearch(diffs, snapshot.getId());
|
final int i = Collections.binarySearch(diffs, snapshotId);
|
||||||
if (i >= 0) {
|
if (i >= 0) {
|
||||||
// exact match
|
// exact match
|
||||||
return diffs.get(i);
|
return diffs.get(i);
|
||||||
} else {
|
} else {
|
||||||
// Exact match not found means that there were no changes between
|
// Exact match not found means that there were no changes between
|
||||||
// given snapshot and the next state so that the diff for the given
|
// given snapshot and the next state so that the diff for the given
|
||||||
// snapshot was not recorded. Thus, return the next state.
|
// snapshot was not recorded. Thus, return the next state.
|
||||||
final int j = -i - 1;
|
final int j = -i - 1;
|
||||||
return j < diffs.size()? diffs.get(j): null;
|
return j < diffs.size()? diffs.get(j): null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Search for the snapshot whose id is 1) no less than the given id,
|
||||||
|
* and 2) most close to the given id.
|
||||||
|
*/
|
||||||
|
public final Snapshot getSnapshotById(final int snapshotId) {
|
||||||
|
D diff = getDiffById(snapshotId);
|
||||||
|
return diff == null ? null : diff.getSnapshot();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if changes have happened between two snapshots.
|
* Check if changes have happened between two snapshots.
|
||||||
* @param earlier The snapshot taken earlier
|
* @param earlier The snapshot taken earlier
|
||||||
|
|
|
@ -68,7 +68,10 @@ public interface FileWithSnapshot {
|
||||||
long oldDiskspace = currentINode.diskspaceConsumed();
|
long oldDiskspace = currentINode.diskspaceConsumed();
|
||||||
if (removed.snapshotINode != null) {
|
if (removed.snapshotINode != null) {
|
||||||
short replication = removed.snapshotINode.getFileReplication();
|
short replication = removed.snapshotINode.getFileReplication();
|
||||||
if (replication > currentINode.getBlockReplication()) {
|
short currentRepl = currentINode.getBlockReplication();
|
||||||
|
if (currentRepl == 0) {
|
||||||
|
oldDiskspace = currentINode.computeFileSize(true, true) * replication;
|
||||||
|
} else if (replication > currentRepl) {
|
||||||
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
|
oldDiskspace = oldDiskspace / currentINode.getBlockReplication()
|
||||||
* replication;
|
* replication;
|
||||||
}
|
}
|
||||||
|
|
|
@ -914,25 +914,14 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
|
||||||
return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
return super.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||||
}
|
}
|
||||||
|
|
||||||
final int diffNum = 0;
|
Snapshot lastSnapshot = diffs.getSnapshotById(lastSnapshotId);
|
||||||
Snapshot lastSnapshot = null;
|
|
||||||
Snapshot lastInDiff = diffs.getLastSnapshot();
|
|
||||||
// if lastSnapshotId > lastInDiff.getId(), the snapshot diff associated with
|
|
||||||
// lastSnapshotId must have been deleted. We should call
|
|
||||||
// getChildrenList(null) to get the children list for the continuous
|
|
||||||
// computation. In the meanwhile, there must be some snapshot diff whose
|
|
||||||
// snapshot id is no less than lastSnapshotId. Otherwise the WithName node
|
|
||||||
// itself should have been deleted.
|
|
||||||
if (lastInDiff != null && lastInDiff.getId() >= lastSnapshotId) {
|
|
||||||
lastSnapshot = diffs.searchSnapshotById(lastSnapshotId);
|
|
||||||
}
|
|
||||||
|
|
||||||
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
|
ReadOnlyList<INode> childrenList = getChildrenList(lastSnapshot);
|
||||||
for (INode child : childrenList) {
|
for (INode child : childrenList) {
|
||||||
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
child.computeQuotaUsage(counts, useCache, lastSnapshotId);
|
||||||
}
|
}
|
||||||
|
|
||||||
counts.add(Quota.NAMESPACE, diffNum + 1);
|
counts.add(Quota.NAMESPACE, 1);
|
||||||
return counts;
|
return counts;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.INodeId;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
|
||||||
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
@ -129,7 +130,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
-40, -41, -42, -43};
|
-40, -41, -42, -43};
|
||||||
private int imageVersion = 0;
|
private int imageVersion = 0;
|
||||||
|
|
||||||
private final Map<String, String> nodeMap = new HashMap<String, String>();
|
private final Map<Long, String> subtreeMap = new HashMap<Long, String>();
|
||||||
|
private final Map<Long, String> dirNodeMap = new HashMap<Long, String>();
|
||||||
|
|
||||||
/* (non-Javadoc)
|
/* (non-Javadoc)
|
||||||
* @see ImageLoader#canProcessVersion(int)
|
* @see ImageLoader#canProcessVersion(int)
|
||||||
|
@ -196,7 +198,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
|
processINodes(in, v, numInodes, skipBlocks, supportSnapshot);
|
||||||
nodeMap.clear();
|
subtreeMap.clear();
|
||||||
|
dirNodeMap.clear();
|
||||||
|
|
||||||
processINodesUC(in, v, skipBlocks);
|
processINodesUC(in, v, skipBlocks);
|
||||||
|
|
||||||
|
@ -441,10 +444,11 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
*/
|
*/
|
||||||
private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
|
private void processDirectoryWithSnapshot(DataInputStream in, ImageVisitor v,
|
||||||
boolean skipBlocks) throws IOException {
|
boolean skipBlocks) throws IOException {
|
||||||
// 1. load dir name
|
// 1. load dir node id
|
||||||
String dirName = FSImageSerialization.readString(in);
|
long inodeId = in.readLong();
|
||||||
|
|
||||||
String oldValue = nodeMap.put(dirName, dirName);
|
String dirName = dirNodeMap.get(inodeId);
|
||||||
|
String oldValue = subtreeMap.put(inodeId, dirName);
|
||||||
if (oldValue != null) { // the subtree has been visited
|
if (oldValue != null) { // the subtree has been visited
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -581,6 +585,8 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
boolean supportSnapshot =
|
boolean supportSnapshot =
|
||||||
LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
|
LayoutVersion.supports(Feature.SNAPSHOT, imageVersion);
|
||||||
|
boolean supportInodeId =
|
||||||
|
LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion);
|
||||||
|
|
||||||
v.visitEnclosingElement(ImageElement.INODE);
|
v.visitEnclosingElement(ImageElement.INODE);
|
||||||
String pathName = FSImageSerialization.readString(in);
|
String pathName = FSImageSerialization.readString(in);
|
||||||
|
@ -591,9 +597,11 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
long inodeId = INodeId.GRANDFATHER_INODE_ID;
|
||||||
v.visit(ImageElement.INODE_PATH, pathName);
|
v.visit(ImageElement.INODE_PATH, pathName);
|
||||||
if (LayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) {
|
if (supportInodeId) {
|
||||||
v.visit(ImageElement.INODE_ID, in.readLong());
|
inodeId = in.readLong();
|
||||||
|
v.visit(ImageElement.INODE_ID, inodeId);
|
||||||
}
|
}
|
||||||
v.visit(ImageElement.REPLICATION, in.readShort());
|
v.visit(ImageElement.REPLICATION, in.readShort());
|
||||||
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
v.visit(ImageElement.MODIFICATION_TIME, formatDate(in.readLong()));
|
||||||
|
@ -619,6 +627,9 @@ class ImageLoaderCurrent implements ImageLoader {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (numBlocks == -1) { // Directory
|
} else if (numBlocks == -1) { // Directory
|
||||||
|
if (supportSnapshot && supportInodeId) {
|
||||||
|
dirNodeMap.put(inodeId, pathName);
|
||||||
|
}
|
||||||
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||||
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
|
if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
|
||||||
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
|
||||||
|
|
Loading…
Reference in New Issue