diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 8d1047f3d0b..a61c3490ba0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -31,6 +31,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins testing
job. (Yongjun Zhang and Todd Lipcon via ozawa)
+ HADOOP-11510. Expose truncate API via FileContext. (yliu)
+
IMPROVEMENTS
HADOOP-11483. HardLink.java should use the jdk7 createLink method (aajisaka)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 92d4ecae166..975cc3cc87a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -637,6 +637,15 @@ public abstract class AbstractFileSystem {
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
+ /**
+ * The specification of this method matches that of
+ * {@link FileContext#truncate(Path, long)} except that Path f must be for
+ * this file system.
+ */
+ public abstract boolean truncate(Path f, long newLength)
+ throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException;
+
/**
* The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index ab5cd13e0c3..7dc4a809cd9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
}
+ @Override
+ public boolean truncate(Path f, long newLength) throws IOException {
+ throw new IOException("Not supported");
+ }
+
/**
* Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
index 1cdcb277604..09707c6c079 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
@@ -169,6 +169,12 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
return fsImpl.open(f, bufferSize);
}
+ @Override
+ public boolean truncate(Path f, long newLength) throws IOException {
+ checkPath(f);
+ return fsImpl.truncate(f, newLength);
+ }
+
@Override
@SuppressWarnings("deprecation") // call to rename
public void renameInternal(Path src, Path dst) throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 3c5e9ab8c57..705a8d8eab1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -828,6 +828,49 @@ public class FileContext {
}.resolve(this, absF);
}
+ /**
+ * Truncate the file in the indicated path to the indicated size.
+ *
+ * - Fails if path is a directory.
+ *
- Fails if path does not exist.
+ *
- Fails if path is not closed.
+ *
- Fails if new size is greater than current size.
+ *
+ * @param f The path to the file to be truncated
+ * @param newLength The size the file is to be truncated to
+ *
+ * @return true
if the file has been truncated to the desired
+ * newLength
and is immediately available to be reused for
+ * write operations such as append
, or
+ * false
if a background process of adjusting the length of
+ * the last block has been started, and clients should wait for it to
+ * complete before proceeding with further file updates.
+ *
+ * @throws AccessControlException If access is denied
+ * @throws FileNotFoundException If file f
does not exist
+ * @throws UnsupportedFileSystemException If file system for f
is
+ * not supported
+ * @throws IOException If an I/O error occurred
+ *
+ * Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ public boolean truncate(final Path f, final long newLength)
+ throws AccessControlException, FileNotFoundException,
+ UnsupportedFileSystemException, IOException {
+ final Path absF = fixRelativePart(f);
+ return new FSLinkResolver() {
+ @Override
+ public Boolean next(final AbstractFileSystem fs, final Path p)
+ throws IOException, UnresolvedLinkException {
+ return fs.truncate(p, newLength);
+ }
+ }.resolve(this, absF);
+ }
+
/**
* Set replication for an existing file.
*
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index b6e1d96e038..4f28a9a6b2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -212,6 +212,14 @@ public abstract class FilterFs extends AbstractFileSystem {
return myFs.open(f, bufferSize);
}
+ @Override
+ public boolean truncate(Path f, long newLength)
+ throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException {
+ checkPath(f);
+ return myFs.truncate(f, newLength);
+ }
+
@Override
public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 9569e1089bb..68e756a81b5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -247,6 +247,12 @@ class ChRootedFs extends AbstractFileSystem {
return myFs.open(fullPath(f), bufferSize);
}
+ @Override
+ public boolean truncate(final Path f, final long newLength)
+ throws IOException, UnresolvedLinkException {
+ return myFs.truncate(fullPath(f), newLength);
+ }
+
@Override
public void renameInternal(final Path src, final Path dst)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
index 014f4881275..975496aa42b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
@@ -452,7 +452,15 @@ public class ViewFs extends AbstractFileSystem {
return res.targetFileSystem.open(res.remainingPath, bufferSize);
}
-
+ @Override
+ public boolean truncate(final Path f, final long newLength)
+ throws AccessControlException, FileNotFoundException,
+ UnresolvedLinkException, IOException {
+ InodeTree.ResolveResult res =
+ fsState.resolve(getUriPath(f), true);
+ return res.targetFileSystem.truncate(res.remainingPath, newLength);
+ }
+
@Override
public void renameInternal(final Path src, final Path dst,
final boolean overwrite) throws IOException, UnresolvedLinkException {
@@ -877,6 +885,13 @@ public class ViewFs extends AbstractFileSystem {
throw new FileNotFoundException("Path points to dir not a file");
}
+ @Override
+ public boolean truncate(final Path f, final long newLength)
+ throws FileNotFoundException, IOException {
+ checkPathIsSlash(f);
+ throw readOnlyMountTable("truncate", f);
+ }
+
@Override
public void renameInternal(final Path src, final Path dst)
throws AccessControlException, IOException {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
index 3bd14f1495a..6b9378d90aa 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestAfsCheckPath.java
@@ -140,6 +140,12 @@ public class TestAfsCheckPath {
return null;
}
+ @Override
+ public boolean truncate(Path f, long newLength) throws IOException {
+ // deliberately empty
+ return false;
+ }
+
@Override
public void renameInternal(Path src, Path dst) throws IOException {
// deliberately empty
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 1b9f515deb8..8c09193c880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -317,6 +317,12 @@ public class Hdfs extends AbstractFileSystem {
return dfs.createWrappedInputStream(dfsis);
}
+ @Override
+ public boolean truncate(Path f, long newLength)
+ throws IOException, UnresolvedLinkException {
+ return dfs.truncate(getUriPath(f), newLength);
+ }
+
@Override
public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
index 8a215b0cee8..94fb0fba930 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
@@ -28,6 +28,7 @@ import java.net.URISyntaxException;
import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -114,7 +115,36 @@ public class TestHDFSFileContextMainOperations extends
private Path getTestRootPath(FileContext fc, String path) {
return fileContextTestHelper.getTestRootPath(fc, path);
}
-
+
+ @Test
+ public void testTruncate() throws Exception {
+ final short repl = 3;
+ final int blockSize = 1024;
+ final int numOfBlocks = 2;
+ DistributedFileSystem fs = cluster.getFileSystem();
+ Path dir = getTestRootPath(fc, "test/hadoop");
+ Path file = getTestRootPath(fc, "test/hadoop/file");
+
+ final byte[] data = FileSystemTestHelper.getFileData(
+ numOfBlocks, blockSize);
+ FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
+
+ final int newLength = blockSize;
+
+ boolean isReady = fc.truncate(file, newLength);
+
+ Assert.assertTrue("Recovery is not expected.", isReady);
+
+ FileStatus fileStatus = fc.getFileStatus(file);
+ Assert.assertEquals(fileStatus.getLen(), newLength);
+ AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
+
+ ContentSummary cs = fs.getContentSummary(dir);
+ Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
+ newLength * repl);
+ Assert.assertTrue(fs.delete(dir, true));
+ }
+
@Test
public void testOldRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();