HADOOP-11510. Expose truncate API via FileContext. (yliu)

This commit is contained in:
yliu 2015-02-10 01:43:08 +08:00
parent 4d4442cb39
commit ae316705bb
11 changed files with 138 additions and 2 deletions

View File

@ -31,6 +31,8 @@ Release 2.7.0 - UNRELEASED
HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins testing HADOOP-11045. Introducing a tool to detect flaky tests of hadoop jenkins testing
job. (Yongjun Zhang and Todd Lipcon via ozawa) job. (Yongjun Zhang and Todd Lipcon via ozawa)
HADOOP-11510. Expose truncate API via FileContext. (yliu)
IMPROVEMENTS IMPROVEMENTS
HADOOP-11483. HardLink.java should use the jdk7 createLink method (aajisaka) HADOOP-11483. HardLink.java should use the jdk7 createLink method (aajisaka)

View File

@ -637,6 +637,15 @@ public abstract class AbstractFileSystem {
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException; UnresolvedLinkException, IOException;
/**
* The specification of this method matches that of
* {@link FileContext#truncate(Path, long)} except that Path f must be for
* this file system.
*/
public abstract boolean truncate(Path f, long newLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException;
/** /**
* The specification of this method matches that of * The specification of this method matches that of
* {@link FileContext#setReplication(Path, short)} except that Path f must be * {@link FileContext#setReplication(Path, short)} except that Path f must be

View File

@ -297,6 +297,11 @@ public abstract class ChecksumFs extends FilterFs {
} }
@Override
public boolean truncate(Path f, long newLength) throws IOException {
throw new IOException("Not supported");
}
/** /**
* Opens an FSDataInputStream at the indicated Path. * Opens an FSDataInputStream at the indicated Path.
* @param f the file name to open * @param f the file name to open

View File

@ -169,6 +169,12 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
return fsImpl.open(f, bufferSize); return fsImpl.open(f, bufferSize);
} }
@Override
public boolean truncate(Path f, long newLength) throws IOException {
checkPath(f);
return fsImpl.truncate(f, newLength);
}
@Override @Override
@SuppressWarnings("deprecation") // call to rename @SuppressWarnings("deprecation") // call to rename
public void renameInternal(Path src, Path dst) throws IOException { public void renameInternal(Path src, Path dst) throws IOException {

View File

@ -828,6 +828,49 @@ public class FileContext {
}.resolve(this, absF); }.resolve(this, absF);
} }
/**
* Truncate the file in the indicated path to the indicated size.
* <ul>
* <li>Fails if path is a directory.
* <li>Fails if path does not exist.
* <li>Fails if path is not closed.
* <li>Fails if new size is greater than current size.
* </ul>
* @param f The path to the file to be truncated
* @param newLength The size the file is to be truncated to
*
* @return <code>true</code> if the file has been truncated to the desired
* <code>newLength</code> and is immediately available to be reused for
* write operations such as <code>append</code>, or
* <code>false</code> if a background process of adjusting the length of
* the last block has been started, and clients should wait for it to
* complete before proceeding with further file updates.
*
* @throws AccessControlException If access is denied
* @throws FileNotFoundException If file <code>f</code> does not exist
* @throws UnsupportedFileSystemException If file system for <code>f</code> is
* not supported
* @throws IOException If an I/O error occurred
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
public boolean truncate(final Path f, final long newLength)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absF = fixRelativePart(f);
return new FSLinkResolver<Boolean>() {
@Override
public Boolean next(final AbstractFileSystem fs, final Path p)
throws IOException, UnresolvedLinkException {
return fs.truncate(p, newLength);
}
}.resolve(this, absF);
}
/** /**
* Set replication for an existing file. * Set replication for an existing file.
* *

View File

@ -212,6 +212,14 @@ public abstract class FilterFs extends AbstractFileSystem {
return myFs.open(f, bufferSize); return myFs.open(f, bufferSize);
} }
@Override
public boolean truncate(Path f, long newLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
checkPath(f);
return myFs.truncate(f, newLength);
}
@Override @Override
public void renameInternal(Path src, Path dst) public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -247,6 +247,12 @@ class ChRootedFs extends AbstractFileSystem {
return myFs.open(fullPath(f), bufferSize); return myFs.open(fullPath(f), bufferSize);
} }
@Override
public boolean truncate(final Path f, final long newLength)
throws IOException, UnresolvedLinkException {
return myFs.truncate(fullPath(f), newLength);
}
@Override @Override
public void renameInternal(final Path src, final Path dst) public void renameInternal(final Path src, final Path dst)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -452,6 +452,14 @@ public class ViewFs extends AbstractFileSystem {
return res.targetFileSystem.open(res.remainingPath, bufferSize); return res.targetFileSystem.open(res.remainingPath, bufferSize);
} }
@Override
public boolean truncate(final Path f, final long newLength)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.truncate(res.remainingPath, newLength);
}
@Override @Override
public void renameInternal(final Path src, final Path dst, public void renameInternal(final Path src, final Path dst,
@ -877,6 +885,13 @@ public class ViewFs extends AbstractFileSystem {
throw new FileNotFoundException("Path points to dir not a file"); throw new FileNotFoundException("Path points to dir not a file");
} }
@Override
public boolean truncate(final Path f, final long newLength)
throws FileNotFoundException, IOException {
checkPathIsSlash(f);
throw readOnlyMountTable("truncate", f);
}
@Override @Override
public void renameInternal(final Path src, final Path dst) public void renameInternal(final Path src, final Path dst)
throws AccessControlException, IOException { throws AccessControlException, IOException {

View File

@ -140,6 +140,12 @@ public class TestAfsCheckPath {
return null; return null;
} }
@Override
public boolean truncate(Path f, long newLength) throws IOException {
// deliberately empty
return false;
}
@Override @Override
public void renameInternal(Path src, Path dst) throws IOException { public void renameInternal(Path src, Path dst) throws IOException {
// deliberately empty // deliberately empty

View File

@ -317,6 +317,12 @@ public class Hdfs extends AbstractFileSystem {
return dfs.createWrappedInputStream(dfsis); return dfs.createWrappedInputStream(dfsis);
} }
@Override
public boolean truncate(Path f, long newLength)
throws IOException, UnresolvedLinkException {
return dfs.truncate(getUriPath(f), newLength);
}
@Override @Override
public void renameInternal(Path src, Path dst) public void renameInternal(Path src, Path dst)
throws IOException, UnresolvedLinkException { throws IOException, UnresolvedLinkException {

View File

@ -28,6 +28,7 @@ import java.net.URISyntaxException;
import javax.security.auth.login.LoginException; import javax.security.auth.login.LoginException;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -115,6 +116,35 @@ public class TestHDFSFileContextMainOperations extends
return fileContextTestHelper.getTestRootPath(fc, path); return fileContextTestHelper.getTestRootPath(fc, path);
} }
@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
DistributedFileSystem fs = cluster.getFileSystem();
Path dir = getTestRootPath(fc, "test/hadoop");
Path file = getTestRootPath(fc, "test/hadoop/file");
final byte[] data = FileSystemTestHelper.getFileData(
numOfBlocks, blockSize);
FileSystemTestHelper.createFile(fs, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fc.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fc.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
ContentSummary cs = fs.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
newLength * repl);
Assert.assertTrue(fs.delete(dir, true));
}
@Test @Test
public void testOldRenameWithQuota() throws Exception { public void testOldRenameWithQuota() throws Exception {
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();