HDFS-7659. truncate should check negative value of the new length. Contributed by Yi Liu.

This commit is contained in:
yliu 2015-01-24 15:41:06 -08:00 committed by Konstantin V Shvachko
parent 8f26d5a8a1
commit e9fd46ddbf
4 changed files with 20 additions and 0 deletions

View File

@ -143,6 +143,9 @@ Trunk (Unreleased)
HDFS-7430. Rewrite the BlockScanner to use O(1) memory and use multiple
threads (cmccabe)
HDFS-7659. truncate should check negative value of the new length.
(Yi Liu via shv)
OPTIMIZATIONS
BUG FIXES

View File

@ -1984,6 +1984,10 @@ public void rename(String src, String dst, Options.Rename... options)
*/
public boolean truncate(String src, long newLength) throws IOException {
checkOpen();
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
TraceScope scope = getPathTraceScope("truncate", src);
try {
return namenode.truncate(src, newLength, clientName);

View File

@ -1911,6 +1911,10 @@ boolean truncateInt(String srcArg, long newLength,
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
+ src + " newLength=" + newLength);
}
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);

View File

@ -34,6 +34,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -443,6 +444,14 @@ public void testTruncateFailure() throws IOException {
} catch(IOException expected) {}
out.close();
try {
fs.truncate(p, -1);
fail("Truncate must fail for a negative new length.");
} catch (HadoopIllegalArgumentException expected) {
GenericTestUtils.assertExceptionContains(
"Cannot truncate to a negative file size", expected);
}
cluster.shutdownDataNodes();
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);