HDFS-9063. Correctly handle snapshot path for getContentSummary. Contributed by Jing Zhao.
This commit is contained in:
parent
e690a32e57
commit
3f42753102
|
@ -1367,6 +1367,8 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-9073. Fix failures in TestLazyPersistLockedMemory
|
||||
testReleaseOnEviction(). (J.Andreina via stevel)
|
||||
|
||||
HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
|
||||
|
||||
Release 2.7.2 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -571,7 +571,8 @@ class FSDirStatAndListingOp {
|
|||
ContentSummaryComputationContext cscc =
|
||||
new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
|
||||
fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
|
||||
ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
|
||||
ContentSummary cs = targetNode.computeAndConvertContentSummary(
|
||||
iip.getPathSnapshotId(), cscc);
|
||||
fsd.addYieldCount(cscc.getYieldCount());
|
||||
return cs;
|
||||
}
|
||||
|
|
|
@ -419,16 +419,17 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
|
||||
/** Compute {@link ContentSummary}. Blocking call */
|
||||
public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
|
||||
return computeAndConvertContentSummary(
|
||||
return computeAndConvertContentSummary(Snapshot.CURRENT_STATE_ID,
|
||||
new ContentSummaryComputationContext(bsps));
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute {@link ContentSummary}.
|
||||
*/
|
||||
public final ContentSummary computeAndConvertContentSummary(
|
||||
public final ContentSummary computeAndConvertContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
ContentCounts counts = computeContentSummary(summary).getCounts();
|
||||
ContentCounts counts = computeContentSummary(snapshotId, summary)
|
||||
.getCounts();
|
||||
final QuotaCounts q = getQuotaCounts();
|
||||
return new ContentSummary.Builder().
|
||||
length(counts.getLength()).
|
||||
|
@ -445,11 +446,16 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
|
|||
/**
|
||||
* Count subtree content summary with a {@link ContentCounts}.
|
||||
*
|
||||
* @param snapshotId Specify the time range for the calculation. If this
|
||||
* parameter equals to {@link Snapshot#CURRENT_STATE_ID},
|
||||
* the result covers both the current states and all the
|
||||
* snapshots. Otherwise the result only covers all the
|
||||
* files/directories contained in the specific snapshot.
|
||||
* @param summary the context object holding counts for the subtree.
|
||||
* @return The same objects as summary.
|
||||
*/
|
||||
public abstract ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext summary);
|
||||
int snapshotId, ContentSummaryComputationContext summary);
|
||||
|
||||
|
||||
/**
|
||||
|
|
|
@ -626,18 +626,20 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
}
|
||||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
final DirectoryWithSnapshotFeature sf = getDirectoryWithSnapshotFeature();
|
||||
if (sf != null) {
|
||||
if (sf != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||
// if the getContentSummary call is against a non-snapshot path, the
|
||||
// computation should include all the deleted files/directories
|
||||
sf.computeContentSummary4Snapshot(summary.getBlockStoragePolicySuite(),
|
||||
summary.getCounts());
|
||||
}
|
||||
final DirectoryWithQuotaFeature q = getDirectoryWithQuotaFeature();
|
||||
if (q != null) {
|
||||
if (q != null && snapshotId == Snapshot.CURRENT_STATE_ID) {
|
||||
return q.computeContentSummary(this, summary);
|
||||
} else {
|
||||
return computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
|
||||
return computeDirectoryContentSummary(summary, snapshotId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -651,7 +653,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
|
|||
byte[] childName = child.getLocalNameBytes();
|
||||
|
||||
long lastYieldCount = summary.getYieldCount();
|
||||
child.computeContentSummary(summary);
|
||||
child.computeContentSummary(snapshotId, summary);
|
||||
|
||||
// Check whether the computation was paused in the subtree.
|
||||
// The counts may be off, but traversing the rest of children
|
||||
|
|
|
@ -610,23 +610,10 @@ public class INodeFile extends INodeWithAdditionalFields
|
|||
|
||||
@Override
|
||||
public final ContentSummaryComputationContext computeContentSummary(
|
||||
final ContentSummaryComputationContext summary) {
|
||||
int snapshotId, final ContentSummaryComputationContext summary) {
|
||||
final ContentCounts counts = summary.getCounts();
|
||||
FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
|
||||
final long fileLen;
|
||||
if (sf == null) {
|
||||
fileLen = computeFileSize();
|
||||
counts.addContent(Content.FILE, 1);
|
||||
} else {
|
||||
final FileDiffList diffs = sf.getDiffs();
|
||||
final int n = diffs.asList().size();
|
||||
counts.addContent(Content.FILE, n);
|
||||
if (n > 0 && sf.isCurrentFileDeleted()) {
|
||||
fileLen = diffs.getLast().getFileSize();
|
||||
} else {
|
||||
fileLen = computeFileSize();
|
||||
}
|
||||
}
|
||||
counts.addContent(Content.FILE, 1);
|
||||
final long fileLen = computeFileSize(snapshotId);
|
||||
counts.addContent(Content.LENGTH, fileLen);
|
||||
counts.addContent(Content.DISKSPACE, storagespaceConsumed(null)
|
||||
.getStorageSpace());
|
||||
|
|
|
@ -109,7 +109,7 @@ public class INodeMap {
|
|||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext summary) {
|
||||
int snapshotId, ContentSummaryComputationContext summary) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -313,9 +313,9 @@ public abstract class INodeReference extends INode {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||
ContentSummaryComputationContext summary) {
|
||||
return referred.computeContentSummary(summary);
|
||||
return referred.computeContentSummary(snapshotId, summary);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -502,11 +502,11 @@ public abstract class INodeReference extends INode {
|
|||
|
||||
@Override
|
||||
public final ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext summary) {
|
||||
int snapshotId, ContentSummaryComputationContext summary) {
|
||||
final int s = snapshotId < lastSnapshotId ? snapshotId : lastSnapshotId;
|
||||
// only count storagespace for WithName
|
||||
final QuotaCounts q = computeQuotaUsage(
|
||||
summary.getBlockStoragePolicySuite(), getStoragePolicyID(), false,
|
||||
lastSnapshotId);
|
||||
summary.getBlockStoragePolicySuite(), getStoragePolicyID(), false, s);
|
||||
summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace());
|
||||
summary.getCounts().addTypeSpaces(q.getTypeSpaces());
|
||||
return summary;
|
||||
|
|
|
@ -94,7 +94,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
public ContentSummaryComputationContext computeContentSummary(int snapshotId,
|
||||
final ContentSummaryComputationContext summary) {
|
||||
summary.getCounts().addContent(Content.SYMLINK, 1);
|
||||
return summary;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
|||
import org.apache.hadoop.hdfs.protocol.SnapshotException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory.SnapshotAndINode;
|
||||
|
@ -219,14 +219,12 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
|
|||
}
|
||||
}
|
||||
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
final BlockStoragePolicySuite bsps,
|
||||
final INodeDirectory snapshotRoot,
|
||||
final ContentSummaryComputationContext summary) {
|
||||
snapshotRoot.computeContentSummary(summary);
|
||||
summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
|
||||
summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
||||
return summary;
|
||||
@Override
|
||||
public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
|
||||
final ContentCounts counts) {
|
||||
counts.addContent(Content.SNAPSHOT, snapshotsByNames.size());
|
||||
counts.addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
|
||||
super.computeContentSummary4Snapshot(bsps, counts);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
|
||||
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.Content;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
|
||||
|
@ -628,13 +627,11 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
|
|||
new ContentSummaryComputationContext(bsps);
|
||||
for(DirectoryDiff d : diffs) {
|
||||
for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
|
||||
deleted.computeContentSummary(summary);
|
||||
deleted.computeContentSummary(Snapshot.CURRENT_STATE_ID, summary);
|
||||
}
|
||||
}
|
||||
// Add the counts from deleted trees.
|
||||
counts.addContents(summary.getCounts());
|
||||
// Add the deleted directory count.
|
||||
counts.addContent(Content.DIRECTORY, diffs.asList().size());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -176,8 +176,7 @@ public class Snapshot implements Comparable<byte[]> {
|
|||
|
||||
@Override
|
||||
public ContentSummaryComputationContext computeContentSummary(
|
||||
ContentSummaryComputationContext summary) {
|
||||
int snapshotId = getParent().getSnapshot(getLocalNameBytes()).getId();
|
||||
int snapshotId, ContentSummaryComputationContext summary) {
|
||||
return computeDirectoryContentSummary(summary, snapshotId);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode.snapshot;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Verify content summary is computed correctly when
|
||||
* 1. There are snapshots taken under the directory
|
||||
* 2. The given path is a snapshot path
|
||||
*/
|
||||
public class TestGetContentSummaryWithSnapshot {
|
||||
protected static final short REPLICATION = 3;
|
||||
protected static final long BLOCKSIZE = 1024;
|
||||
|
||||
protected Configuration conf;
|
||||
protected MiniDFSCluster cluster;
|
||||
protected FSNamesystem fsn;
|
||||
protected FSDirectory fsdir;
|
||||
protected DistributedFileSystem dfs;
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
|
||||
cluster.waitActive();
|
||||
|
||||
fsn = cluster.getNamesystem();
|
||||
fsdir = fsn.getFSDirectory();
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate against a snapshot path.
|
||||
* 1. create dirs /foo/bar
|
||||
* 2. take snapshot s1 on /foo
|
||||
* 3. create a 10 byte file /foo/bar/baz
|
||||
* Make sure for "/foo/bar" and "/foo/.snapshot/s1/bar" have correct results:
|
||||
* the 1 byte file is not included in snapshot s1.
|
||||
*/
|
||||
@Test
|
||||
public void testGetContentSummary() throws IOException {
|
||||
final Path foo = new Path("/foo");
|
||||
final Path bar = new Path(foo, "bar");
|
||||
final Path baz = new Path(bar, "baz");
|
||||
|
||||
dfs.mkdirs(bar);
|
||||
dfs.allowSnapshot(foo);
|
||||
dfs.createSnapshot(foo, "s1");
|
||||
|
||||
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
|
||||
|
||||
ContentSummary summary = cluster.getNameNodeRpc().getContentSummary(
|
||||
bar.toString());
|
||||
Assert.assertEquals(1, summary.getDirectoryCount());
|
||||
Assert.assertEquals(1, summary.getFileCount());
|
||||
Assert.assertEquals(10, summary.getLength());
|
||||
|
||||
final Path barS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar");
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(barS1.toString());
|
||||
Assert.assertEquals(1, summary.getDirectoryCount());
|
||||
Assert.assertEquals(0, summary.getFileCount());
|
||||
Assert.assertEquals(0, summary.getLength());
|
||||
|
||||
// also check /foo and /foo/.snapshot/s1
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
|
||||
Assert.assertEquals(2, summary.getDirectoryCount());
|
||||
Assert.assertEquals(1, summary.getFileCount());
|
||||
Assert.assertEquals(10, summary.getLength());
|
||||
|
||||
final Path fooS1 = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
||||
summary = cluster.getNameNodeRpc().getContentSummary(fooS1.toString());
|
||||
Assert.assertEquals(2, summary.getDirectoryCount());
|
||||
Assert.assertEquals(0, summary.getFileCount());
|
||||
Assert.assertEquals(0, summary.getLength());
|
||||
|
||||
final Path bazS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar/baz");
|
||||
try {
|
||||
cluster.getNameNodeRpc().getContentSummary(bazS1.toString());
|
||||
Assert.fail("should get FileNotFoundException");
|
||||
} catch (FileNotFoundException ignored) {}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue