diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 08f705a2238..cca755ebf44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -143,6 +143,9 @@ Trunk (Unreleased) HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple threads (cmccabe) + HDFS-7659. truncate should check negative value of the new length. + (Yi Liu via shv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 1bb7f4a787b..21f75a586a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1984,6 +1984,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, */ public boolean truncate(String src, long newLength) throws IOException { checkOpen(); + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } TraceScope scope = getPathTraceScope("truncate", src); try { return namenode.truncate(src, newLength, clientName); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 6a8f57452de..fae1641e0f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1911,6 +1911,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src=" + src + " newLength=" + newLength); } + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } HdfsFileStatus stat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 5498b128c2a..1612a24cf5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -34,6 +34,7 @@ import java.io.IOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; @@ -443,6 +444,14 @@ public class TestFileTruncate { } catch(IOException expected) {} out.close(); + try { + fs.truncate(p, -1); + fail("Truncate must fail for a negative new length."); + } catch (HadoopIllegalArgumentException expected) { + GenericTestUtils.assertExceptionContains( + "Cannot truncate to a negative file size", expected); + } + cluster.shutdownDataNodes(); NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);