diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 47ead307577..192cfc82150 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -497,6 +497,9 @@ Release 2.7.0 - UNRELEASED HDFS-7643. Test case to ensure lazy persist files cannot be truncated. (Yi Liu via Arpit Agarwal) + HDFS-7659. truncate should check negative value of the new length. + (Yi Liu via shv) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index cf76913d3f2..c130d2f6fd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -1991,6 +1991,10 @@ public void rename(String src, String dst, Options.Rename... options) */ public boolean truncate(String src, long newLength) throws IOException { checkOpen(); + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } try { return namenode.truncate(src, newLength, clientName); } catch (RemoteException re) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 96672c2cefd..3f5cc4a96a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1965,6 +1965,10 @@ boolean truncateInt(String srcArg, long newLength, NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src=" + src + " newLength=" + newLength); } + if (newLength < 0) { + throw new HadoopIllegalArgumentException( + "Cannot truncate to a negative file size: " + newLength + "."); + } HdfsFileStatus stat = null; FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java index 5498b128c2a..1612a24cf5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FSDataOutputStream; @@ -443,6 +444,14 @@ public void testTruncateFailure() throws IOException { } catch(IOException expected) {} out.close(); + try { + fs.truncate(p, -1); + fail("Truncate must fail for a negative new length."); + } catch (HadoopIllegalArgumentException expected) { + GenericTestUtils.assertExceptionContains( + "Cannot truncate to a negative file size", expected); + } + cluster.shutdownDataNodes(); NameNodeAdapter.getLeaseManager(cluster.getNamesystem()) .setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);