From d483ba25d7e90ec140a86c526c7e60cc6015f210 Mon Sep 17 00:00:00 2001 From: yliu Date: Tue, 27 Jan 2015 23:47:52 +0800 Subject: [PATCH] HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/DistributedFileSystem.java | 14 ++++++++- .../server/namenode/TestFileTruncate.java | 30 +++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7ebc31e7634..2c73143c340 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -514,6 +514,8 @@ Release 2.7.0 - UNRELEASED HDFS-7566. Remove obsolete entries from hdfs-default.xml (Ray Chiang via aw) + HDFS-7677. DistributedFileSystem#truncate should resolve symlinks. (yliu) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index c7f8b7f4274..97ef2f50eb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -630,7 +630,19 @@ public class DistributedFileSystem extends FileSystem { @Override public boolean truncate(Path f, final long newLength) throws IOException { statistics.incrementWriteOps(1); - return dfs.truncate(getPathName(f), newLength); + Path absF = fixRelativePart(f); + return new FileSystemLinkResolver() { + @Override + public Boolean doCall(final Path p) + throws IOException, UnresolvedLinkException { + return dfs.truncate(getPathName(p), newLength); + } + @Override + public Boolean next(final FileSystem fs, final Path p) + throws IOException { + return fs.truncate(p, newLength); + } + }.resolve(this, absF); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index e8250a21d09..579e718906b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -759,6 +759,36 @@ public class TestFileTruncate { } } + @Test + public void testTruncate4Symlink() throws IOException { + final int fileLength = 3 * BLOCK_SIZE; + + final Path parent = new Path("/test"); + fs.mkdirs(parent); + final byte[] contents = AppendTestUtil.initBuffer(fileLength); + final Path file = new Path(parent, "testTruncate4Symlink"); + writeContents(contents, fileLength, file); + + final Path link = new Path(parent, "link"); + fs.createSymlink(file, link, false); + + final int newLength = fileLength/3; + boolean isReady = fs.truncate(link, newLength); + + assertTrue("Recovery is not expected.", isReady); + + FileStatus fileStatus = fs.getFileStatus(file); + assertThat(fileStatus.getLen(), is((long) newLength)); + + ContentSummary cs = fs.getContentSummary(parent); + assertEquals("Bad disk space usage", + cs.getSpaceConsumed(), newLength * REPLICATION); + // validate the file content + checkFullFile(file, newLength, contents); + + fs.delete(parent, true); + } + static void writeContents(byte[] contents, int fileLength, Path p) throws IOException { FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,