HDFS-4578. Restrict snapshot IDs to 24-bit wide. Contributed by Arpit Agarwal

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1477181 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-29 16:37:30 +00:00
parent 5276f4e04d
commit 884cbb681a
4 changed files with 130 additions and 11 deletions

View File

@ -318,3 +318,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4767. If a directory is snapshottable, do not replace it when clearing
quota. (Jing Zhao via szetszwo)
HDFS-4578. Restrict snapshot IDs to 24-bit wide. (Arpit Agarwal via
szetszwo)

View File

@ -54,6 +54,7 @@ import com.google.common.base.Preconditions;
*/
public class SnapshotManager implements SnapshotStats {
private final FSDirectory fsdir;
private static final int SNAPSHOT_ID_BIT_WIDTH = 24;
private final AtomicInteger numSnapshots = new AtomicInteger();
@ -128,8 +129,26 @@ public class SnapshotManager implements SnapshotStats {
removeSnapshottable(s);
}
/**
* Find the source root directory where the snapshot will be taken
* for a given path.
*
* @param path The directory path where the snapshot will be taken.
* @return Snapshottable directory.
* @throws IOException
* Throw IOException when the given path does not lead to an
* existing snapshottable directory.
*/
public INodeDirectorySnapshottable getSnapshottableRoot(final String path
) throws IOException {
final INodesInPath i = fsdir.getINodesInPath4Write(path);
return INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
}
/**
* Create a snapshot of the given path.
* It is assumed that the caller will perform synchronization.
*
* @param path
* The directory path where the snapshot will be taken.
* @param snapshotName
@ -142,10 +161,17 @@ public class SnapshotManager implements SnapshotStats {
*/
public String createSnapshot(final String path, String snapshotName
) throws IOException {
// Find the source root directory path where the snapshot is taken.
final INodesInPath i = fsdir.getINodesInPath4Write(path);
final INodeDirectorySnapshottable srcRoot
= INodeDirectorySnapshottable.valueOf(i.getLastINode(), path);
INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
if (snapshotCounter == getMaxSnapshotID()) {
// We have reached the maximum allowable snapshot ID and since we don't
// handle rollover we will fail all subsequent snapshot creation
// requests.
//
throw new SnapshotException(
"Failed to create the snapshot. The FileSystem has run out of " +
"snapshot IDs and ID rollover is not supported.");
}
srcRoot.addSnapshot(snapshotCounter, snapshotName);
@ -166,14 +192,10 @@ public class SnapshotManager implements SnapshotStats {
BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes)
throws IOException {
// parse the path, and check if the path is a snapshot path
INodesInPath inodesInPath = fsdir.getINodesInPath4Write(path.toString());
// transfer the inode for path to an INodeDirectorySnapshottable.
// the INodeDirectorySnapshottable#valueOf method will throw Exception
// if the path is not for a snapshottable directory
INodeDirectorySnapshottable dir = INodeDirectorySnapshottable.valueOf(
inodesInPath.getLastINode(), path.toString());
dir.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
INodeDirectorySnapshottable srcRoot = getSnapshottableRoot(path);
srcRoot.removeSnapshot(snapshotName, collectedBlocks, removedINodes);
numSnapshots.getAndDecrement();
}
@ -298,4 +320,14 @@ public class SnapshotManager implements SnapshotStats {
return snapshotRoot.computeDiff(from, to);
}
/**
* Returns the maximum allowable snapshot ID based on the bit width of the
* snapshot ID.
*
* @return maximum allowable snapshot ID.
*/
public int getMaxSnapshotID() {
return ((1 << SNAPSHOT_ID_BIT_WIDTH) - 1);
}
}

View File

@ -140,6 +140,10 @@ public class TestNestedSnapshots {
}
}
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test (timeout=300000)
public void testSnapshotLimit() throws Exception {
final int step = 1000;

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.ArrayList;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.junit.*;
import static org.mockito.Mockito.*;
/**
* Testing snapshot manager functionality.
*/
public class TestSnapshotManager {
private static final int testMaxSnapshotLimit = 7;
/**
* Test that the global limit on snapshots is honored.
*/
@Test (timeout=10000)
public void testSnapshotLimits() throws Exception {
// Setup mock objects for SnapshotManager.createSnapshot.
//
INodeDirectorySnapshottable ids = mock(INodeDirectorySnapshottable.class);
FSDirectory fsdir = mock(FSDirectory.class);
SnapshotManager sm = spy(new SnapshotManager(fsdir));
doReturn(ids).when(sm).getSnapshottableRoot(anyString());
doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
// Create testMaxSnapshotLimit snapshots. These should all succeed.
//
for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
sm.createSnapshot("dummy", i.toString());
}
// Attempt to create one more snapshot. This should fail due to snapshot
// ID rollover.
//
try {
sm.createSnapshot("dummy", "shouldFailSnapshot");
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
se.getMessage().toLowerCase().contains("rollover"));
}
// Delete a snapshot to free up a slot.
//
sm.deleteSnapshot("", "", mock(INode.BlocksMapUpdateInfo.class), new ArrayList<INode>());
// Attempt to create a snapshot again. It should still fail due
// to snapshot ID rollover.
//
try {
sm.createSnapshot("dummy", "shouldFailSnapshot2");
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
se.getMessage().toLowerCase().contains("rollover"));
}
}
}