diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 74e5be81e2b..358f0e491de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -109,12 +109,15 @@ public class HttpFSFileSystem extends FileSystem public static final String XATTR_VALUE_PARAM = "xattr.value"; public static final String XATTR_SET_FLAG_PARAM = "flag"; public static final String XATTR_ENCODING_PARAM = "encoding"; + public static final String NEW_LENGTH_PARAM = "newlength"; public static final Short DEFAULT_PERMISSION = 0755; public static final String ACLSPEC_DEFAULT = ""; public static final String RENAME_JSON = "boolean"; + public static final String TRUNCATE_JSON = "boolean"; + public static final String DELETE_JSON = "boolean"; public static final String MKDIRS_JSON = "boolean"; @@ -191,7 +194,7 @@ public static enum Operation { GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET), GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET), INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET), - APPEND(HTTP_POST), CONCAT(HTTP_POST), + APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST), CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT), SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT), MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT), @@ -568,6 +571,25 @@ public FSDataOutputStream append(Path f, int bufferSize, HttpURLConnection.HTTP_OK); } + /** + * Truncate a file. + * + * @param f the file to be truncated. + * @param newLength The size the file is to be truncated to. + * + * @throws IOException + */ + @Override + public boolean truncate(Path f, long newLength) throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.TRUNCATE.toString()); + params.put(NEW_LENGTH_PARAM, Long.toString(newLength)); + HttpURLConnection conn = getConnection(Operation.TRUNCATE.getMethod(), + params, f, true); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return (Boolean) json.get(TRUNCATE_JSON); + } + /** * Concat existing files together. * @param f the path to the target destination. diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index e7d92f59588..633589cd468 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -363,7 +363,7 @@ public Void execute(FileSystem fs) throws IOException { } /** - * Executor that performs an append FileSystemAccess files system operation. + * Executor that performs a concat FileSystemAccess files system operation. */ @InterfaceAudience.Private public static class FSConcat implements FileSystemAccess.FileSystemExecutor { @@ -403,6 +403,47 @@ public Void execute(FileSystem fs) throws IOException { } + /** + * Executor that performs a truncate FileSystemAccess files system operation. + */ + @InterfaceAudience.Private + public static class FSTruncate implements + FileSystemAccess.FileSystemExecutor { + private Path path; + private long newLength; + + /** + * Creates a Truncate executor. + * + * @param path target path to truncate to. + * @param newLength The size the file is to be truncated to. + */ + public FSTruncate(String path, long newLength) { + this.path = new Path(path); + this.newLength = newLength; + } + + /** + * Executes the filesystem operation. + * + * @param fs filesystem instance to use. + * + * @return true if the file has been truncated to the desired, + * false if a background process of adjusting the + * length of the last block has been started, and clients should + * wait for it to complete before proceeding with further file + * updates. + * + * @throws IOException thrown if an IO error occured. + */ + @Override + public JSONObject execute(FileSystem fs) throws IOException { + boolean result = fs.truncate(path, newLength); + return toJSON(HttpFSFileSystem.TRUNCATE_JSON.toLowerCase(), result); + } + + } + /** * Executor that performs a content-summary FileSystemAccess files system operation. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 9b0be9bfc0f..271f3d9f00a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -62,6 +62,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{}); PARAMS_DEF.put(Operation.APPEND, new Class[]{DataParam.class}); PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class}); + PARAMS_DEF.put(Operation.TRUNCATE, new Class[]{NewLengthParam.class}); PARAMS_DEF.put(Operation.CREATE, new Class[]{PermissionParam.class, OverwriteParam.class, ReplicationParam.class, BlockSizeParam.class, DataParam.class}); @@ -287,6 +288,25 @@ public OffsetParam() { } } + /** + * Class for newlength parameter. + */ + @InterfaceAudience.Private + public static class NewLengthParam extends LongParam { + + /** + * Parameter name. + */ + public static final String NAME = HttpFSFileSystem.NEW_LENGTH_PARAM; + + /** + * Constructor. + */ + public NewLengthParam() { + super(NAME, 0l); + } + } + /** * Class for overwrite parameter. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 91037183461..1f903ba1546 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam; +import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.NewLengthParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam; @@ -427,6 +428,15 @@ public Response post(InputStream is, response = Response.ok().build(); break; } + case TRUNCATE: { + Long newLength = params.get(NewLengthParam.NAME, NewLengthParam.class); + FSOperations.FSTruncate command = + new FSOperations.FSTruncate(path, newLength); + JSONObject json = fsExecute(user, command); + AUDIT_LOG.info("Truncate [{}] to length [{}]", path, newLength); + response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP POST operation [{0}]", diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index f063e33f777..2cc67d417c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -24,12 +24,14 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.AppendTestUtil; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -192,7 +194,7 @@ private void testCreate() throws Exception { Assert.fail("the create should have failed because the file exists " + "and override is FALSE"); } catch (IOException ex) { -System.out.println("#"); + System.out.println("#"); } catch (Exception ex) { Assert.fail(ex.toString()); } @@ -222,6 +224,31 @@ private void testAppend() throws Exception { } } + private void testTruncate() throws Exception { + if (!isLocalFS()) { + final short repl = 3; + final int blockSize = 1024; + final int numOfBlocks = 2; + FileSystem fs = FileSystem.get(getProxiedFSConf()); + fs.mkdirs(getProxiedFSTestDir()); + Path file = new Path(getProxiedFSTestDir(), "foo.txt"); + final byte[] data = FileSystemTestHelper.getFileData( + numOfBlocks, blockSize); + FileSystemTestHelper.createFile(fs, file, data, blockSize, repl); + + final int newLength = blockSize; + + boolean isReady = fs.truncate(file, newLength); + Assert.assertTrue("Recovery is not expected.", isReady); + + FileStatus fileStatus = fs.getFileStatus(file); + Assert.assertEquals(fileStatus.getLen(), newLength); + AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString()); + + fs.close(); + } + } + private void testConcat() throws Exception { Configuration config = getProxiedFSConf(); config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024); @@ -784,9 +811,10 @@ private void testDirAcls() throws Exception { } protected enum Operation { - GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS, - SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, - FILEACLS, DIRACLS, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS + GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, + WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, + SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR, + GET_XATTRS, REMOVE_XATTR, LIST_XATTRS } private void operation(Operation op) throws Exception { @@ -803,8 +831,12 @@ private void operation(Operation op) throws Exception { case APPEND: testAppend(); break; + case TRUNCATE: + testTruncate(); + break; case CONCAT: testConcat(); + break; case RENAME: testRename(); break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9c152d9c789..40196b39501 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -35,6 +35,8 @@ Release 2.7.0 - UNRELEASED HDFS-7584. Enable Quota Support for Storage Types (See breakdown of tasks below) + HDFS-7656. Expose truncate API for HDFS httpfs. (yliu) + IMPROVEMENTS HDFS-7055. Add tracing to DFSInputStream (cmccabe)