HDFS-3626. Creating file with invalid path can corrupt edit log. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1365801 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
56b0912b6c
commit
972953bd77
|
@ -611,7 +611,7 @@ public class FileUtil {
|
||||||
*/
|
*/
|
||||||
public static int symLink(String target, String linkname) throws IOException{
|
public static int symLink(String target, String linkname) throws IOException{
|
||||||
String cmd = "ln -s " + target + " " + linkname;
|
String cmd = "ln -s " + target + " " + linkname;
|
||||||
Process p = Runtime.getRuntime().exec(cmd, null);
|
Process p = Runtime.getRuntime().exec(new String[]{"ln","-s",target,linkname}, null);
|
||||||
int returnVal = -1;
|
int returnVal = -1;
|
||||||
try{
|
try{
|
||||||
returnVal = p.waitFor();
|
returnVal = p.waitFor();
|
||||||
|
|
|
@ -139,7 +139,7 @@ public class Path implements Comparable {
|
||||||
* Construct a path from a URI
|
* Construct a path from a URI
|
||||||
*/
|
*/
|
||||||
public Path(URI aUri) {
|
public Path(URI aUri) {
|
||||||
uri = aUri;
|
uri = aUri.normalize();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Construct a Path from components. */
|
/** Construct a Path from components. */
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class TestPath extends TestCase {
|
||||||
assertEquals(pathString, new Path(pathString).toString());
|
assertEquals(pathString, new Path(pathString).toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNormalize() {
|
public void testNormalize() throws URISyntaxException {
|
||||||
assertEquals("", new Path(".").toString());
|
assertEquals("", new Path(".").toString());
|
||||||
assertEquals("..", new Path("..").toString());
|
assertEquals("..", new Path("..").toString());
|
||||||
assertEquals("/", new Path("/").toString());
|
assertEquals("/", new Path("/").toString());
|
||||||
|
@ -75,6 +75,8 @@ public class TestPath extends TestCase {
|
||||||
assertEquals("foo", new Path("foo/").toString());
|
assertEquals("foo", new Path("foo/").toString());
|
||||||
assertEquals("foo", new Path("foo//").toString());
|
assertEquals("foo", new Path("foo//").toString());
|
||||||
assertEquals("foo/bar", new Path("foo//bar").toString());
|
assertEquals("foo/bar", new Path("foo//bar").toString());
|
||||||
|
assertEquals("hdfs://foo/foo2/bar/baz/",
|
||||||
|
new Path(new URI("hdfs://foo//foo2///bar/baz///")).toString());
|
||||||
if (Path.WINDOWS) {
|
if (Path.WINDOWS) {
|
||||||
assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
|
assertEquals("c:/a/b", new Path("c:\\a\\b").toString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -531,6 +531,8 @@ Branch-2 ( Unreleased changes )
|
||||||
|
|
||||||
HDFS-3720. hdfs.h must get packaged. (Colin Patrick McCabe via atm)
|
HDFS-3720. hdfs.h must get packaged. (Colin Patrick McCabe via atm)
|
||||||
|
|
||||||
|
HDFS-3626. Creating file with invalid path can corrupt edit log (todd)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-3042 SUBTASKS
|
BREAKDOWN OF HDFS-3042 SUBTASKS
|
||||||
|
|
||||||
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
||||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
@ -118,7 +119,7 @@ public class DFSUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Whether the pathname is valid. Currently prohibits relative paths,
|
* Whether the pathname is valid. Currently prohibits relative paths,
|
||||||
* and names which contain a ":" or "/"
|
* names which contain a ":" or "//", or other non-canonical paths.
|
||||||
*/
|
*/
|
||||||
public static boolean isValidName(String src) {
|
public static boolean isValidName(String src) {
|
||||||
// Path must be absolute.
|
// Path must be absolute.
|
||||||
|
@ -127,15 +128,22 @@ public class DFSUtil {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for ".." "." ":" "/"
|
// Check for ".." "." ":" "/"
|
||||||
StringTokenizer tokens = new StringTokenizer(src, Path.SEPARATOR);
|
String[] components = StringUtils.split(src, '/');
|
||||||
while(tokens.hasMoreTokens()) {
|
for (int i = 0; i < components.length; i++) {
|
||||||
String element = tokens.nextToken();
|
String element = components[i];
|
||||||
if (element.equals("..") ||
|
if (element.equals("..") ||
|
||||||
element.equals(".") ||
|
element.equals(".") ||
|
||||||
(element.indexOf(":") >= 0) ||
|
(element.indexOf(":") >= 0) ||
|
||||||
(element.indexOf("/") >= 0)) {
|
(element.indexOf("/") >= 0)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The string may start or end with a /, but not have
|
||||||
|
// "//" in the middle.
|
||||||
|
if (element.isEmpty() && i != components.length - 1 &&
|
||||||
|
i != 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -230,8 +230,15 @@ public class FSDirectory implements Closeable {
|
||||||
|
|
||||||
// Always do an implicit mkdirs for parent directory tree.
|
// Always do an implicit mkdirs for parent directory tree.
|
||||||
long modTime = now();
|
long modTime = now();
|
||||||
if (!mkdirs(new Path(path).getParent().toString(), permissions, true,
|
|
||||||
modTime)) {
|
Path parent = new Path(path).getParent();
|
||||||
|
if (parent == null) {
|
||||||
|
// Trying to add "/" as a file - this path has no
|
||||||
|
// parent -- avoids an NPE below.
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mkdirs(parent.toString(), permissions, true, modTime)) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
|
INodeFileUnderConstruction newNode = new INodeFileUnderConstruction(
|
||||||
|
|
|
@ -17,8 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.DataOutputStream;
|
import java.io.DataOutputStream;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
|
@ -26,33 +25,38 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class tests that the DFS command mkdirs cannot create subdirectories
|
* This class tests that the DFS command mkdirs only creates valid
|
||||||
* from a file when passed an illegal path. HADOOP-281.
|
* directories, and generally behaves as expected.
|
||||||
*/
|
*/
|
||||||
public class TestDFSMkdirs {
|
public class TestDFSMkdirs {
|
||||||
|
private Configuration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
private static final String[] NON_CANONICAL_PATHS = new String[] {
|
||||||
|
"//test1",
|
||||||
|
"/test2/..",
|
||||||
|
"/test2//bar",
|
||||||
|
"/test2/../test4",
|
||||||
|
"/test5/."
|
||||||
|
};
|
||||||
|
|
||||||
private void writeFile(FileSystem fileSys, Path name) throws IOException {
|
|
||||||
DataOutputStream stm = fileSys.create(name);
|
|
||||||
stm.writeBytes("wchien");
|
|
||||||
stm.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests mkdirs can create a directory that does not exist and will
|
* Tests mkdirs can create a directory that does not exist and will
|
||||||
* not create a subdirectory off a file.
|
* not create a subdirectory off a file. Regression test for HADOOP-281.
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testDFSMkdirs() throws IOException {
|
public void testDFSMkdirs() throws IOException {
|
||||||
Configuration conf = new HdfsConfiguration();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
|
||||||
FileSystem fileSys = cluster.getFileSystem();
|
FileSystem fileSys = cluster.getFileSystem();
|
||||||
try {
|
try {
|
||||||
// First create a new directory with mkdirs
|
// First create a new directory with mkdirs
|
||||||
|
@ -63,7 +67,7 @@ public class TestDFSMkdirs {
|
||||||
|
|
||||||
// Second, create a file in that directory.
|
// Second, create a file in that directory.
|
||||||
Path myFile = new Path("/test/mkdirs/myFile");
|
Path myFile = new Path("/test/mkdirs/myFile");
|
||||||
writeFile(fileSys, myFile);
|
DFSTestUtil.writeFile(fileSys, myFile, "hello world");
|
||||||
|
|
||||||
// Third, use mkdir to create a subdirectory off of that file,
|
// Third, use mkdir to create a subdirectory off of that file,
|
||||||
// and check that it fails.
|
// and check that it fails.
|
||||||
|
@ -99,7 +103,7 @@ public class TestDFSMkdirs {
|
||||||
// Create a dir when parent dir exists as a file, should fail
|
// Create a dir when parent dir exists as a file, should fail
|
||||||
IOException expectedException = null;
|
IOException expectedException = null;
|
||||||
String filePath = "/mkdir-file-" + Time.now();
|
String filePath = "/mkdir-file-" + Time.now();
|
||||||
writeFile(dfs, new Path(filePath));
|
DFSTestUtil.writeFile(dfs, new Path(filePath), "hello world");
|
||||||
try {
|
try {
|
||||||
dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
|
dfs.mkdir(new Path(filePath + "/mkdir"), FsPermission.getDefault());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -126,4 +130,29 @@ public class TestDFSMkdirs {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regression test for HDFS-3626. Creates a file using a non-canonical path
|
||||||
|
* (i.e. with extra slashes between components) and makes sure that the NN
|
||||||
|
* rejects it.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testMkdirRpcNonCanonicalPath() throws IOException {
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
try {
|
||||||
|
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
||||||
|
|
||||||
|
for (String pathStr : NON_CANONICAL_PATHS) {
|
||||||
|
try {
|
||||||
|
nnrpc.mkdirs(pathStr, new FsPermission((short)0755), true);
|
||||||
|
fail("Did not fail when called with a non-canonicalized path: "
|
||||||
|
+ pathStr);
|
||||||
|
} catch (InvalidPathException ipe) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -620,4 +620,12 @@ public class TestDFSUtil {
|
||||||
assertEquals(1, uris.size());
|
assertEquals(1, uris.size());
|
||||||
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
|
assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIsValidName() {
|
||||||
|
assertFalse(DFSUtil.isValidName("/foo/../bar"));
|
||||||
|
assertFalse(DFSUtil.isValidName("/foo//bar"));
|
||||||
|
assertTrue(DFSUtil.isValidName("/"));
|
||||||
|
assertTrue(DFSUtil.isValidName("/bar/"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,8 @@ import java.io.FileNotFoundException;
|
||||||
import java.io.FileReader;
|
import java.io.FileReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.net.URISyntaxException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
@ -53,6 +55,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FsServerDefaults;
|
import org.apache.hadoop.fs.FsServerDefaults;
|
||||||
|
import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
@ -69,7 +72,10 @@ import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Ignore;
|
import org.junit.Ignore;
|
||||||
|
@ -93,6 +99,15 @@ public class TestFileCreation {
|
||||||
static final int numBlocks = 2;
|
static final int numBlocks = 2;
|
||||||
static final int fileSize = numBlocks * blockSize + 1;
|
static final int fileSize = numBlocks * blockSize + 1;
|
||||||
boolean simulatedStorage = false;
|
boolean simulatedStorage = false;
|
||||||
|
|
||||||
|
private static final String[] NON_CANONICAL_PATHS = new String[] {
|
||||||
|
"//foo",
|
||||||
|
"///foo2",
|
||||||
|
"//dir//file",
|
||||||
|
"////test2/file",
|
||||||
|
"/dir/./file2",
|
||||||
|
"/dir/../file3"
|
||||||
|
};
|
||||||
|
|
||||||
// creates a file but does not close it
|
// creates a file but does not close it
|
||||||
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
||||||
|
@ -988,4 +1003,93 @@ public class TestFileCreation {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Regression test for HDFS-3626. Creates a file using a non-canonical path
|
||||||
|
* (i.e. with extra slashes between components) and makes sure that the NN
|
||||||
|
* can properly restart.
|
||||||
|
*
|
||||||
|
* This test RPCs directly to the NN, to ensure that even an old client
|
||||||
|
* which passes an invalid path won't cause corrupt edits.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
|
||||||
|
doCreateTest(CreationMethod.DIRECT_NN_RPC);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Another regression test for HDFS-3626. This one creates files using
|
||||||
|
* a Path instantiated from a string object.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCreateNonCanonicalPathAndRestartFromString()
|
||||||
|
throws Exception {
|
||||||
|
doCreateTest(CreationMethod.PATH_FROM_STRING);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Another regression test for HDFS-3626. This one creates files using
|
||||||
|
* a Path instantiated from a URI object.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testCreateNonCanonicalPathAndRestartFromUri()
|
||||||
|
throws Exception {
|
||||||
|
doCreateTest(CreationMethod.PATH_FROM_URI);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static enum CreationMethod {
|
||||||
|
DIRECT_NN_RPC,
|
||||||
|
PATH_FROM_URI,
|
||||||
|
PATH_FROM_STRING
|
||||||
|
};
|
||||||
|
private void doCreateTest(CreationMethod method) throws Exception {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
.numDataNodes(1).build();
|
||||||
|
try {
|
||||||
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
||||||
|
|
||||||
|
for (String pathStr : NON_CANONICAL_PATHS) {
|
||||||
|
System.out.println("Creating " + pathStr + " by " + method);
|
||||||
|
switch (method) {
|
||||||
|
case DIRECT_NN_RPC:
|
||||||
|
try {
|
||||||
|
nnrpc.create(pathStr, new FsPermission((short)0755), "client",
|
||||||
|
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
|
||||||
|
true, (short)1, 128*1024*1024L);
|
||||||
|
fail("Should have thrown exception when creating '"
|
||||||
|
+ pathStr + "'" + " by " + method);
|
||||||
|
} catch (InvalidPathException ipe) {
|
||||||
|
// When we create by direct NN RPC, the NN just rejects the
|
||||||
|
// non-canonical paths, rather than trying to normalize them.
|
||||||
|
// So, we expect all of them to fail.
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
|
||||||
|
case PATH_FROM_URI:
|
||||||
|
case PATH_FROM_STRING:
|
||||||
|
// Unlike the above direct-to-NN case, we expect these to succeed,
|
||||||
|
// since the Path constructor should normalize the path.
|
||||||
|
Path p;
|
||||||
|
if (method == CreationMethod.PATH_FROM_URI) {
|
||||||
|
p = new Path(new URI(fs.getUri() + pathStr));
|
||||||
|
} else {
|
||||||
|
p = new Path(fs.getUri() + pathStr);
|
||||||
|
}
|
||||||
|
FSDataOutputStream stm = fs.create(p);
|
||||||
|
IOUtils.closeStream(stm);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new AssertionError("bad method: " + method);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cluster.restartNameNode();
|
||||||
|
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue