HDFS-4706. Do not replace root inode for disallowSnapshot.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1469122 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0ad27ad3e3
commit
419fd3999c
|
@ -253,3 +253,5 @@ Branch-2802 Snapshot (Unreleased)
|
|||
|
||||
HDFS-4707. Add snapshot methods to FilterFileSystem and fix findbugs warnings.
|
||||
(szetszwo)
|
||||
|
||||
HDFS-4706. Do not replace root inode for disallowSnapshot. (szetszwo)
|
||||
|
|
|
@ -85,6 +85,7 @@ import com.google.common.base.Joiner;
|
|||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.primitives.SignedBytes;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -92,7 +93,18 @@ public class DFSUtil {
|
|||
public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
|
||||
|
||||
public static final byte[] EMPTY_BYTES = {};
|
||||
|
||||
|
||||
/** Compare two byte arrays by lexicographical order. */
|
||||
public static int compareBytes(byte[] left, byte[] right) {
|
||||
if (left == null) {
|
||||
left = EMPTY_BYTES;
|
||||
}
|
||||
if (right == null) {
|
||||
right = EMPTY_BYTES;
|
||||
}
|
||||
return SignedBytes.lexicographicalComparator().compare(left, right);
|
||||
}
|
||||
|
||||
private DFSUtil() { /* Hidden constructor */ }
|
||||
private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
|
||||
@Override
|
||||
|
|
|
@ -28,7 +28,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
|
|||
/**
|
||||
* Metadata about a snapshottable directory
|
||||
*/
|
||||
public class SnapshottableDirectoryStatus {
|
||||
public class SnapshottableDirectoryStatus
|
||||
implements Comparable<SnapshottableDirectoryStatus> {
|
||||
/** Basic information of the snapshottable directory */
|
||||
private HdfsFileStatus dirStatus;
|
||||
|
||||
|
@ -144,4 +145,12 @@ public class SnapshottableDirectoryStatus {
|
|||
private static int maxLength(int n, Object value) {
|
||||
return Math.max(n, String.valueOf(value).length());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(SnapshottableDirectoryStatus that) {
|
||||
int d = DFSUtil.compareBytes(this.parentFullPath, that.parentFullPath);
|
||||
return d != 0? d
|
||||
: DFSUtil.compareBytes(this.dirStatus.getLocalNameInBytes(),
|
||||
that.dirStatus.getLocalNameInBytes());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,6 +172,11 @@ public class FSDirectory implements Closeable {
|
|||
return getFSNamesystem().getBlockManager();
|
||||
}
|
||||
|
||||
/** @return the root directory inode. */
|
||||
public INodeDirectoryWithQuota getRoot() {
|
||||
return rootDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Notify that loading of this FSDirectory is complete, and
|
||||
* it is ready for use
|
||||
|
|
|
@ -3764,12 +3764,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
}
|
||||
|
||||
@Metric({ "SnapshottableDirectories", "Number of snapshottable directories" })
|
||||
public long getNumSnapshottableDirs() {
|
||||
public int getNumSnapshottableDirs() {
|
||||
return this.snapshotManager.getNumSnapshottableDirs();
|
||||
}
|
||||
|
||||
@Metric({ "Snapshots", "The number of snapshots" })
|
||||
public long getNumSnapshots() {
|
||||
public int getNumSnapshots() {
|
||||
return this.snapshotManager.getNumSnapshots();
|
||||
}
|
||||
|
||||
|
@ -5966,7 +5966,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
|||
*/
|
||||
void removeSnapshottableDirs(List<INodeDirectorySnapshottable> toRemove) {
|
||||
if (snapshotManager != null) {
|
||||
snapshotManager.removeSnapshottableDirs(toRemove);
|
||||
snapshotManager.removeSnapshottable(toRemove);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.util.StringUtils;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.primitives.SignedBytes;
|
||||
|
||||
/**
|
||||
* We keep an in-memory representation of the file/block hierarchy.
|
||||
|
@ -602,10 +601,7 @@ public abstract class INode implements Diff.Element<byte[]> {
|
|||
|
||||
@Override
|
||||
public final int compareTo(byte[] bytes) {
|
||||
final byte[] name = getLocalNameBytes();
|
||||
final byte[] left = name == null? DFSUtil.EMPTY_BYTES: name;
|
||||
final byte[] right = bytes == null? DFSUtil.EMPTY_BYTES: bytes;
|
||||
return SignedBytes.lexicographicalComparator().compare(left, right);
|
||||
return DFSUtil.compareBytes(getLocalNameBytes(), bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -650,7 +646,8 @@ public abstract class INode implements Diff.Element<byte[]> {
|
|||
Snapshot snapshot) {
|
||||
out.print(prefix);
|
||||
out.print(" ");
|
||||
out.print(getLocalName());
|
||||
final String name = getLocalName();
|
||||
out.print(name.isEmpty()? "/": name);
|
||||
out.print(" (");
|
||||
out.print(getObjectString());
|
||||
out.print("), ");
|
||||
|
|
|
@ -470,16 +470,22 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
|
|||
if (snapshot == null) {
|
||||
out.println();
|
||||
out.print(prefix);
|
||||
|
||||
out.print("Snapshot of ");
|
||||
final String name = getLocalName();
|
||||
out.print(name.isEmpty()? "/": name);
|
||||
out.print(": quota=");
|
||||
out.print(getSnapshotQuota());
|
||||
|
||||
int n = 0;
|
||||
for(DirectoryDiff diff : getDiffs()) {
|
||||
if (diff.isSnapshotRoot()) {
|
||||
n++;
|
||||
}
|
||||
}
|
||||
out.print(n);
|
||||
out.print(n <= 1 ? " snapshot of " : " snapshots of ");
|
||||
final String name = getLocalName();
|
||||
out.println(name.isEmpty()? "/": name);
|
||||
Preconditions.checkState(n == snapshotsByNames.size());
|
||||
out.print(", #snapshot=");
|
||||
out.println(n);
|
||||
|
||||
dumpTreeRecursively(out, prefix, new Iterable<SnapshotAndINode>() {
|
||||
@Override
|
||||
|
|
|
@ -21,8 +21,8 @@ import java.io.DataInput;
|
|||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -37,6 +37,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
|
|||
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Manage snapshottable directories and their snapshots.
|
||||
*
|
||||
|
@ -52,14 +54,13 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottab
|
|||
public class SnapshotManager implements SnapshotStats {
|
||||
private final FSDirectory fsdir;
|
||||
|
||||
private final AtomicInteger numSnapshottableDirs = new AtomicInteger();
|
||||
private final AtomicInteger numSnapshots = new AtomicInteger();
|
||||
|
||||
private int snapshotCounter = 0;
|
||||
|
||||
/** All snapshottable directories in the namesystem. */
|
||||
private final List<INodeDirectorySnapshottable> snapshottables
|
||||
= new ArrayList<INodeDirectorySnapshottable>();
|
||||
private final Map<Long, INodeDirectorySnapshottable> snapshottables
|
||||
= new HashMap<Long, INodeDirectorySnapshottable>();
|
||||
|
||||
public SnapshotManager(final FSDirectory fsdir) {
|
||||
this.fsdir = fsdir;
|
||||
|
@ -72,26 +73,36 @@ public class SnapshotManager implements SnapshotStats {
|
|||
public void setSnapshottable(final String path) throws IOException {
|
||||
final INodesInPath iip = fsdir.getLastINodeInPath(path);
|
||||
final INodeDirectory d = INodeDirectory.valueOf(iip.getINode(0), path);
|
||||
|
||||
final INodeDirectorySnapshottable s;
|
||||
if (d.isSnapshottable()) {
|
||||
//The directory is already a snapshottable directory.
|
||||
((INodeDirectorySnapshottable)d).setSnapshotQuota(
|
||||
INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
|
||||
return;
|
||||
s = (INodeDirectorySnapshottable)d;
|
||||
s.setSnapshotQuota(INodeDirectorySnapshottable.SNAPSHOT_LIMIT);
|
||||
} else {
|
||||
s = d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot());
|
||||
}
|
||||
|
||||
final INodeDirectorySnapshottable s
|
||||
= d.replaceSelf4INodeDirectorySnapshottable(iip.getLatestSnapshot());
|
||||
snapshottables.add(s);
|
||||
numSnapshottableDirs.getAndIncrement();
|
||||
addSnapshottable(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a snapshottable dir into {@link #snapshottables}. Called when loading
|
||||
* fsimage.
|
||||
* @param dir The snapshottable dir to be added.
|
||||
*/
|
||||
/** Add the given snapshottable directory to {@link #snapshottables}. */
|
||||
public void addSnapshottable(INodeDirectorySnapshottable dir) {
|
||||
snapshottables.add(dir);
|
||||
snapshottables.put(dir.getId(), dir);
|
||||
}
|
||||
|
||||
/** Remove the given snapshottable directory from {@link #snapshottables}. */
|
||||
private void removeSnapshottable(INodeDirectorySnapshottable s) {
|
||||
final INodeDirectorySnapshottable removed = snapshottables.remove(s.getId());
|
||||
Preconditions.checkState(s == removed);
|
||||
}
|
||||
|
||||
/** Remove snapshottable directories from {@link #snapshottables} */
|
||||
public void removeSnapshottable(List<INodeDirectorySnapshottable> toRemove) {
|
||||
if (toRemove != null) {
|
||||
for (INodeDirectorySnapshottable s : toRemove) {
|
||||
removeSnapshottable(s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -99,8 +110,7 @@ public class SnapshotManager implements SnapshotStats {
|
|||
*
|
||||
* @throws SnapshotException if there are snapshots in the directory.
|
||||
*/
|
||||
public void resetSnapshottable(final String path
|
||||
) throws IOException {
|
||||
public void resetSnapshottable(final String path) throws IOException {
|
||||
final INodesInPath iip = fsdir.getLastINodeInPath(path);
|
||||
final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf(
|
||||
iip.getINode(0), path);
|
||||
|
@ -109,10 +119,12 @@ public class SnapshotManager implements SnapshotStats {
|
|||
+ "Please redo the operation after removing all the snapshots.");
|
||||
}
|
||||
|
||||
s.replaceSelf(iip.getLatestSnapshot());
|
||||
snapshottables.remove(s);
|
||||
|
||||
numSnapshottableDirs.getAndDecrement();
|
||||
if (s == fsdir.getRoot()) {
|
||||
s.setSnapshotQuota(0);
|
||||
} else {
|
||||
s.replaceSelf(iip.getLatestSnapshot());
|
||||
}
|
||||
removeSnapshottable(s);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -189,12 +201,12 @@ public class SnapshotManager implements SnapshotStats {
|
|||
}
|
||||
|
||||
@Override
|
||||
public long getNumSnapshottableDirs() {
|
||||
return numSnapshottableDirs.get();
|
||||
public int getNumSnapshottableDirs() {
|
||||
return snapshottables.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumSnapshots() {
|
||||
public int getNumSnapshots() {
|
||||
return numSnapshots.get();
|
||||
}
|
||||
|
||||
|
@ -204,11 +216,10 @@ public class SnapshotManager implements SnapshotStats {
|
|||
*/
|
||||
public void write(DataOutput out) throws IOException {
|
||||
out.writeInt(snapshotCounter);
|
||||
out.writeInt(numSnapshottableDirs.get());
|
||||
out.writeInt(numSnapshots.get());
|
||||
|
||||
// write all snapshots.
|
||||
for(INodeDirectorySnapshottable snapshottableDir : snapshottables) {
|
||||
for(INodeDirectorySnapshottable snapshottableDir : snapshottables.values()) {
|
||||
for(Snapshot s : snapshottableDir.getSnapshotsByNames()) {
|
||||
s.write(out);
|
||||
}
|
||||
|
@ -222,7 +233,6 @@ public class SnapshotManager implements SnapshotStats {
|
|||
public Map<Integer, Snapshot> read(DataInput in, FSImageFormat.Loader loader
|
||||
) throws IOException {
|
||||
snapshotCounter = in.readInt();
|
||||
numSnapshottableDirs.set(in.readInt());
|
||||
numSnapshots.set(in.readInt());
|
||||
|
||||
// read snapshots
|
||||
|
@ -249,7 +259,7 @@ public class SnapshotManager implements SnapshotStats {
|
|||
|
||||
List<SnapshottableDirectoryStatus> statusList =
|
||||
new ArrayList<SnapshottableDirectoryStatus>();
|
||||
for (INodeDirectorySnapshottable dir : snapshottables) {
|
||||
for (INodeDirectorySnapshottable dir : snapshottables.values()) {
|
||||
if (userName == null || userName.equals(dir.getUserName())) {
|
||||
SnapshottableDirectoryStatus status = new SnapshottableDirectoryStatus(
|
||||
dir.getModificationTime(), dir.getAccessTime(),
|
||||
|
@ -261,30 +271,9 @@ public class SnapshotManager implements SnapshotStats {
|
|||
statusList.add(status);
|
||||
}
|
||||
}
|
||||
return statusList.toArray(new SnapshottableDirectoryStatus[statusList
|
||||
.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove snapshottable directories from {@link #snapshottables}
|
||||
* @param toRemoveList A list of INodeDirectorySnapshottable to be removed
|
||||
*/
|
||||
public void removeSnapshottableDirs(
|
||||
List<INodeDirectorySnapshottable> toRemoveList) {
|
||||
if (toRemoveList != null) {
|
||||
Iterator<INodeDirectorySnapshottable> iter = snapshottables.iterator();
|
||||
while (iter.hasNext()) {
|
||||
INodeDirectorySnapshottable next = iter.next();
|
||||
for (INodeDirectorySnapshottable toRemove : toRemoveList) {
|
||||
if (next == toRemove) {
|
||||
iter.remove();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// modify the numSnapshottableDirs metrics
|
||||
numSnapshottableDirs.addAndGet(-toRemoveList.size());
|
||||
}
|
||||
Collections.sort(statusList);
|
||||
return statusList.toArray(
|
||||
new SnapshottableDirectoryStatus[statusList.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -26,11 +26,11 @@ public interface SnapshotStats {
|
|||
/**
|
||||
* @return The number of snapshottale directories in the system
|
||||
*/
|
||||
public long getNumSnapshottableDirs();
|
||||
public int getNumSnapshottableDirs();
|
||||
|
||||
/**
|
||||
* @return The number of directories that have been snapshotted
|
||||
*/
|
||||
public long getNumSnapshots();
|
||||
public int getNumSnapshots();
|
||||
|
||||
}
|
||||
|
|
|
@ -106,6 +106,17 @@ public class TestNestedSnapshots {
|
|||
|
||||
assertFile(s1path, s2path, file1, true, true, true);
|
||||
assertFile(s1path, s2path, file2, true, false, false);
|
||||
|
||||
final String rootStr = "/";
|
||||
final Path rootPath = new Path(rootStr);
|
||||
hdfs.allowSnapshot(rootStr);
|
||||
print("allow snapshot " + rootStr);
|
||||
final Path rootSnapshot = hdfs.createSnapshot(rootPath);
|
||||
print("create snapshot " + rootSnapshot);
|
||||
hdfs.deleteSnapshot(rootPath, rootSnapshot.getName());
|
||||
print("delete snapshot " + rootSnapshot);
|
||||
hdfs.disallowSnapshot(rootStr);
|
||||
print("disallow snapshot " + rootStr);
|
||||
}
|
||||
|
||||
private static void print(String message) throws UnresolvedLinkException {
|
||||
|
|
|
@ -78,45 +78,45 @@ public class TestSnapshotMetrics {
|
|||
*/
|
||||
@Test
|
||||
public void testSnapshottableDirs() throws Exception {
|
||||
assertGauge("SnapshottableDirectories", 0L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 0, getMetrics(NS_METRICS));
|
||||
assertCounter("AllowSnapshotOps", 0L, getMetrics(NN_METRICS));
|
||||
assertCounter("DisallowSnapshotOps", 0L, getMetrics(NN_METRICS));
|
||||
|
||||
// Allow snapshots for directories, and check the metrics
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
assertGauge("SnapshottableDirectories", 1L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
|
||||
assertCounter("AllowSnapshotOps", 1L, getMetrics(NN_METRICS));
|
||||
|
||||
Path sub2 = new Path(dir, "sub2");
|
||||
Path file = new Path(sub2, "file");
|
||||
DFSTestUtil.createFile(hdfs, file, 1024, REPLICATION, seed);
|
||||
hdfs.allowSnapshot(sub2.toString());
|
||||
assertGauge("SnapshottableDirectories", 2L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
|
||||
assertCounter("AllowSnapshotOps", 2L, getMetrics(NN_METRICS));
|
||||
|
||||
Path subsub1 = new Path(sub1, "sub1sub1");
|
||||
Path subfile = new Path(subsub1, "file");
|
||||
DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
|
||||
hdfs.allowSnapshot(subsub1.toString());
|
||||
assertGauge("SnapshottableDirectories", 3L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
|
||||
assertCounter("AllowSnapshotOps", 3L, getMetrics(NN_METRICS));
|
||||
|
||||
// Set an already snapshottable directory to snapshottable, should not
|
||||
// change the metrics
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
assertGauge("SnapshottableDirectories", 3L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
|
||||
// But the number of allowSnapshot operations still increases
|
||||
assertCounter("AllowSnapshotOps", 4L, getMetrics(NN_METRICS));
|
||||
|
||||
// Disallow the snapshot for snapshottable directories, then check the
|
||||
// metrics again
|
||||
hdfs.disallowSnapshot(sub1.toString());
|
||||
assertGauge("SnapshottableDirectories", 2L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
|
||||
assertCounter("DisallowSnapshotOps", 1L, getMetrics(NN_METRICS));
|
||||
|
||||
// delete subsub1, snapshottable directories should be 1
|
||||
hdfs.delete(subsub1, true);
|
||||
assertGauge("SnapshottableDirectories", 1L, getMetrics(NS_METRICS));
|
||||
assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
|
||||
|
||||
// list all the snapshottable directories
|
||||
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
|
||||
|
@ -130,7 +130,7 @@ public class TestSnapshotMetrics {
|
|||
*/
|
||||
@Test
|
||||
public void testSnapshots() throws Exception {
|
||||
assertGauge("Snapshots", 0L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 0, getMetrics(NS_METRICS));
|
||||
assertCounter("CreateSnapshotOps", 0L, getMetrics(NN_METRICS));
|
||||
|
||||
// Create a snapshot for a non-snapshottable directory, thus should not
|
||||
|
@ -138,16 +138,16 @@ public class TestSnapshotMetrics {
|
|||
try {
|
||||
hdfs.createSnapshot(sub1, "s1");
|
||||
} catch (Exception e) {}
|
||||
assertGauge("Snapshots", 0L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 0, getMetrics(NS_METRICS));
|
||||
assertCounter("CreateSnapshotOps", 1L, getMetrics(NN_METRICS));
|
||||
|
||||
// Create snapshot for sub1
|
||||
hdfs.allowSnapshot(sub1.toString());
|
||||
hdfs.createSnapshot(sub1, "s1");
|
||||
assertGauge("Snapshots", 1L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 1, getMetrics(NS_METRICS));
|
||||
assertCounter("CreateSnapshotOps", 2L, getMetrics(NN_METRICS));
|
||||
hdfs.createSnapshot(sub1, "s2");
|
||||
assertGauge("Snapshots", 2L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
|
||||
assertCounter("CreateSnapshotOps", 3L, getMetrics(NN_METRICS));
|
||||
hdfs.getSnapshotDiffReport(sub1, "s1", "s2");
|
||||
assertCounter("SnapshotDiffReportOps", 1L, getMetrics(NN_METRICS));
|
||||
|
@ -158,17 +158,17 @@ public class TestSnapshotMetrics {
|
|||
DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
|
||||
hdfs.allowSnapshot(subsub1.toString());
|
||||
hdfs.createSnapshot(subsub1, "s11");
|
||||
assertGauge("Snapshots", 3L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 3, getMetrics(NS_METRICS));
|
||||
assertCounter("CreateSnapshotOps", 4L, getMetrics(NN_METRICS));
|
||||
|
||||
// delete snapshot
|
||||
hdfs.deleteSnapshot(sub1, "s2");
|
||||
assertGauge("Snapshots", 2L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
|
||||
assertCounter("DeleteSnapshotOps", 1L, getMetrics(NN_METRICS));
|
||||
|
||||
// rename snapshot
|
||||
hdfs.renameSnapshot(sub1, "s1", "NewS1");
|
||||
assertGauge("Snapshots", 2L, getMetrics(NS_METRICS));
|
||||
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
|
||||
assertCounter("RenameSnapshotOps", 1L, getMetrics(NN_METRICS));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue