From e363417e7b7abdd5d149f303f729ecf3e95ef8f3 Mon Sep 17 00:00:00 2001 From: Uma Mahesh Date: Thu, 10 Dec 2015 23:55:29 -0800 Subject: [PATCH] HDFS-9472. concat() API does not give proper exception messages on ./reserved relative path (Rakesh R via umamahesh) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSDirConcatOp.java | 24 +++++++++++++++---- .../hdfs/server/namenode/TestHDFSConcat.java | 19 +++++++++++++++ 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7fe585041b3..1696053ab9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1737,6 +1737,9 @@ Release 2.8.0 - UNRELEASED HDFS-9527. The return type of FSNamesystem.getBlockCollection should be changed to INodeFile. (szetszwo) + HDFS-9472. concat() API does not give proper exception messages on ./reserved + relative path (Rakesh R via umamahesh) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 83782d5a9ca..5310b947ff3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -49,14 +49,11 @@ class FSDirConcatOp { static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, boolean logRetryCache) throws IOException { - Preconditions.checkArgument(!target.isEmpty(), "Target file name is empty"); - Preconditions.checkArgument(srcs != null && srcs.length > 0, - "No sources given"); + validatePath(target, srcs); assert srcs != null; if (FSDirectory.LOG.isDebugEnabled()) { FSDirectory.LOG.debug("concat {} to {}", Arrays.toString(srcs), target); } - final INodesInPath targetIIP = fsd.getINodesInPath4Write(target); // write permission for the target FSPermissionChecker pc = null; @@ -86,6 +83,25 @@ class FSDirConcatOp { return fsd.getAuditFileInfo(targetIIP); } + private static void validatePath(String target, String[] srcs) + throws IOException { + Preconditions.checkArgument(!target.isEmpty(), "Target file name is empty"); + Preconditions.checkArgument(srcs != null && srcs.length > 0, + "No sources given"); + if (FSDirectory.isReservedRawName(target) + || FSDirectory.isReservedInodesName(target)) { + throw new IOException("Concat operation doesn't support " + + FSDirectory.DOT_RESERVED_STRING + " relative path : " + target); + } + for (String srcPath : srcs) { + if (FSDirectory.isReservedRawName(srcPath) + || FSDirectory.isReservedInodesName(srcPath)) { + throw new IOException("Concat operation doesn't support " + + FSDirectory.DOT_RESERVED_STRING + " relative path : " + srcPath); + } + } + } + private static void verifyTargetFile(FSDirectory fsd, final String target, final INodesInPath targetIIP) throws IOException { // check the target diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index ded00317ae0..6dc0782ab47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -503,4 +503,23 @@ public class TestHDFSConcat { assertEquals(blockSize * 2, dfs.getFileStatus(trg).getLen()); assertFalse(dfs.exists(src)); } + + @Test(timeout = 30000) + public void testConcatReservedRelativePaths() throws IOException { + String testPathDir = "/.reserved/raw/ezone"; + Path dir = new Path(testPathDir); + dfs.mkdirs(dir); + Path trg = new Path(testPathDir, "trg"); + Path src = new Path(testPathDir, "src"); + DFSTestUtil.createFile(dfs, trg, blockSize, REPL_FACTOR, 1); + DFSTestUtil.createFile(dfs, src, blockSize, REPL_FACTOR, 1); + try { + dfs.concat(trg, new Path[] { src }); + Assert.fail("Must throw Exception!"); + } catch (IOException e) { + String errMsg = "Concat operation doesn't support " + + FSDirectory.DOT_RESERVED_STRING + " relative path : " + trg; + GenericTestUtils.assertExceptionContains(errMsg, e); + } + } }