HDFS-5102. Snapshot names should not be allowed to contain slash characters. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514797 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2013-08-16 17:14:34 +00:00
parent 45694cce8e
commit 8d21926c26
4 changed files with 76 additions and 0 deletions

View File

@ -350,6 +350,9 @@ Release 2.1.1-beta - UNRELEASED
HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth) HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth)
HDFS-5102. Snapshot names should not be allowed to contain slash characters.
(jing9)
Release 2.1.0-beta - 2013-08-22 Release 2.1.0-beta - 2013-08-22
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -2093,6 +2093,10 @@ private void verifyQuotaForRename(INode[] src, INode[] dst)
/** Verify if the snapshot name is legal. */ /** Verify if the snapshot name is legal. */
void verifySnapshotName(String snapshotName, String path) void verifySnapshotName(String snapshotName, String path)
throws PathComponentTooLongException { throws PathComponentTooLongException {
if (snapshotName.contains(Path.SEPARATOR)) {
throw new HadoopIllegalArgumentException(
"Snapshot name cannot contain \"" + Path.SEPARATOR + "\"");
}
final byte[] bytes = DFSUtil.string2Bytes(snapshotName); final byte[] bytes = DFSUtil.string2Bytes(snapshotName);
verifyINodeName(bytes); verifyINodeName(bytes);
verifyMaxComponentLength(bytes, path, 0); verifyMaxComponentLength(bytes, path, 0);

View File

@ -44,6 +44,7 @@
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
@ -54,6 +55,7 @@
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor; import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -341,6 +343,37 @@ public void testUpdateDirectory() throws Exception {
assertEquals(oldStatus.getAccessTime(), snapshotStatus.getAccessTime()); assertEquals(oldStatus.getAccessTime(), snapshotStatus.getAccessTime());
} }
/**
* Test creating a snapshot with illegal name
*/
@Test
public void testCreateSnapshotWithIllegalName() throws Exception {
final Path dir = new Path("/dir");
hdfs.mkdirs(dir);
final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
try {
hdfs.createSnapshot(dir, name1);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR
+ "\" is a reserved name.";
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
for (String badName : badNames) {
try {
hdfs.createSnapshot(dir, badName);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
}
}
/** /**
* Creating snapshots for a directory that is not snapshottable must fail. * Creating snapshots for a directory that is not snapshottable must fail.
*/ */

View File

@ -20,6 +20,7 @@
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.List; import java.util.List;
@ -29,11 +30,14 @@
import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff;
import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
@ -190,4 +194,36 @@ public void testRenameToExistingSnapshot() throws Exception {
exception.expectMessage(error); exception.expectMessage(error);
hdfs.renameSnapshot(sub1, "s1", "s2"); hdfs.renameSnapshot(sub1, "s1", "s2");
} }
/**
* Test renaming a snapshot with illegal name
*/
@Test
public void testRenameWithIllegalName() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
try {
hdfs.renameSnapshot(sub1, "s1", name1);
fail("Exception expected when an illegal name is given for rename");
} catch (RemoteException e) {
String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR
+ "\" is a reserved name.";
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
for (String badName : badNames) {
try {
hdfs.renameSnapshot(sub1, "s1", badName);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
}
}
} }