From 74ec3c001def5fe38347fa41890c67cda275cbc0 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Tue, 28 Apr 2020 06:16:42 +0900 Subject: [PATCH] HDFS-15286. Concat on a same file deleting the file. Contributed by hemanthboyina. (cherry picked from commit 5e0eda5d5f696aba7fc209874d232baf2a50d547) --- .../hdfs/server/namenode/FSDirConcatOp.java | 2 +- .../hdfs/server/namenode/TestHDFSConcat.java | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index b92c4140b4a..ebd7b6020ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -150,7 +150,7 @@ class FSDirConcatOp { + " is referred by some other reference in some snapshot."); } // source file cannot be the same with the target file - if (srcINode == targetINode) { + if (srcINode.equals(targetINode)) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.getPath()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index b5e0efe0649..cbb516415c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.LambdaTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -524,4 +525,42 @@ public class TestHDFSConcat { GenericTestUtils.assertExceptionContains(errMsg, e); } } + + /** + * Test concat on same source and target file which is a inode reference. + */ + @Test + public void testConcatOnSameFile() throws Exception { + String dir = "/dir1"; + Path trgDir = new Path(dir); + dfs.mkdirs(new Path(dir)); + + // create a source file + String dir2 = "/dir2"; + Path srcDir = new Path(dir2); + dfs.mkdirs(srcDir); + dfs.allowSnapshot(srcDir); + Path src = new Path(srcDir, "file1"); + DFSTestUtil.createFile(dfs, src, 512, (short) 2, 0); + + // make the file as an Inode reference and delete the reference + dfs.createSnapshot(srcDir, "s1"); + dfs.rename(src, trgDir); + dfs.deleteSnapshot(srcDir, "s1"); + Path[] srcs = new Path[1]; + srcs[0] = new Path(dir, "file1"); + + // perform concat + LambdaTestUtils.intercept(RemoteException.class, + "concat: the src file /dir1/file1 is the same with the target" + + " file /dir1/file1", + () -> dfs.concat(srcs[0], srcs)); + + // the file should exists and read the file + byte[] buff = new byte[1080]; + FSDataInputStream stream = dfs.open(srcs[0]); + stream.readFully(0, buff, 0, 512); + + assertEquals(1, dfs.getContentSummary(new Path(dir)).getFileCount()); + } }