HDFS-7659. truncate should check negative value of the new length. Contributed by Yi Liu.

This commit is contained in:
yliu 2015-01-24 17:09:55 -08:00 committed by Konstantin V Shvachko
parent de66227a57
commit 5f70b7ecf0
4 changed files with 20 additions and 0 deletions

View File

@ -497,6 +497,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7643. Test case to ensure lazy persist files cannot be truncated.
(Yi Liu via Arpit Agarwal)
HDFS-7659. truncate should check negative value of the new length.
(Yi Liu via shv)
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -1991,6 +1991,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public boolean truncate(String src, long newLength) throws IOException {
checkOpen();
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
try {
return namenode.truncate(src, newLength, clientName);
} catch (RemoteException re) {

View File

@ -1965,6 +1965,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
NameNode.stateChangeLog.debug("DIR* NameSystem.truncate: src="
+ src + " newLength=" + newLength);
}
if (newLength < 0) {
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
HdfsFileStatus stat = null;
FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);

View File

@ -34,6 +34,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
@ -443,6 +444,14 @@ public class TestFileTruncate {
} catch(IOException expected) {}
out.close();
try {
fs.truncate(p, -1);
fail("Truncate must fail for a negative new length.");
} catch (HadoopIllegalArgumentException expected) {
GenericTestUtils.assertExceptionContains(
"Cannot truncate to a negative file size", expected);
}
cluster.shutdownDataNodes();
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);