diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index d156b1d9898..5d95df8f516 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -188,6 +189,7 @@ public class HttpFSFileSystem extends FileSystem public static final String FILE_STATUSES_JSON = "FileStatuses"; public static final String FILE_STATUS_JSON = "FileStatus"; + public static final String FS_STATUS_JSON = "FsStatus"; public static final String PATH_SUFFIX_JSON = "pathSuffix"; public static final String TYPE_JSON = "type"; public static final String LENGTH_JSON = "length"; @@ -208,6 +210,9 @@ public class HttpFSFileSystem extends FileSystem public static final String XATTRNAMES_JSON = "XAttrNames"; public static final String ECPOLICY_JSON = "ecPolicyObj"; public static final String SYMLINK_JSON = "symlink"; + public static final String CAPACITY_JSON = "capacity"; + public static final String USED_JSON = "used"; + public static final String REMAINING_JSON = "remaining"; public static final String FILE_CHECKSUM_JSON = "FileChecksum"; public static final String CHECKSUM_ALGORITHM_JSON = "algorithm"; @@ -278,6 +283,7 @@ public class HttpFSFileSystem extends FileSystem CHECKACCESS(HTTP_GET), SETECPOLICY(HTTP_PUT), GETECPOLICY(HTTP_GET), UNSETECPOLICY( HTTP_POST), SATISFYSTORAGEPOLICY(HTTP_PUT), GETSNAPSHOTDIFFLISTING(HTTP_GET), GETFILELINKSTATUS(HTTP_GET), + GETSTATUS(HTTP_GET), GET_BLOCK_LOCATIONS(HTTP_GET); private String httpMethod; @@ -1756,6 +1762,17 @@ public class HttpFSFileSystem extends FileSystem return status.makeQualified(getUri(), path); } + @Override + public FsStatus getStatus(final Path path) throws IOException { + Map params = new HashMap<>(); + params.put(OP_PARAM, Operation.GETSTATUS.toString()); + HttpURLConnection conn = + getConnection(Operation.GETSTATUS.getMethod(), params, path, true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.toFsStatus(json); + } + @VisibleForTesting static BlockLocation[] toBlockLocations(JSONObject json) throws IOException { ObjectMapper mapper = new ObjectMapper(); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index cffd92d5e17..d32c19ec9e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; import org.apache.hadoop.fs.FsServerDefaults; @@ -424,6 +425,23 @@ public final class FSOperations { return json; } + /** + * Executes the fsStatus operation. + * + * @param fsStatus a FsStatus object + * @return JSON map suitable for wire transport + */ + @SuppressWarnings("unchecked") + private static Map toJson(FsStatus fsStatus) { + Map json = new LinkedHashMap<>(); + JSONObject statusJson = new JSONObject(); + statusJson.put(HttpFSFileSystem.USED_JSON, fsStatus.getUsed()); + statusJson.put(HttpFSFileSystem.REMAINING_JSON, fsStatus.getRemaining()); + statusJson.put(HttpFSFileSystem.CAPACITY_JSON, fsStatus.getCapacity()); + json.put(HttpFSFileSystem.FS_STATUS_JSON, statusJson); + return json; + } + /** * Executor that performs an append FileSystemAccess files system operation. */ @@ -2300,4 +2318,28 @@ public final class FSOperations { return toJson(status); } } + + /** + * Executor that performs a getFsStatus operation. + */ + @InterfaceAudience.Private + public static class FSStatus implements FileSystemAccess.FileSystemExecutor { + final private Path path; + + /** + * Creates a fsStatus executor. + * + * @param path the path to retrieve the status. + */ + public FSStatus(String path) { + this.path = new Path(path); + } + + @Override + public Map execute(FileSystem fs) throws IOException { + FsStatus fsStatus = fs.getStatus(path); + HttpFSServerWebApp.get().getMetrics().incrOpsStatus(); + return toJson(fsStatus); + } + } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 8175de03767..1d319516c2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -129,6 +129,7 @@ public class HttpFSParametersProvider extends ParametersProvider { PARAMS_DEF.put(Operation.UNSETECPOLICY, new Class[] {}); PARAMS_DEF.put(Operation.SATISFYSTORAGEPOLICY, new Class[] {}); PARAMS_DEF.put(Operation.GETFILELINKSTATUS, new Class[]{}); + PARAMS_DEF.put(Operation.GETSTATUS, new Class[]{}); PARAMS_DEF.put(Operation.GET_BLOCK_LOCATIONS, new Class[] {OffsetParam.class, LenParam.class}); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 2dd46c221bb..8d5921411ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -554,6 +554,12 @@ public class HttpFSServer { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETSTATUS: { + FSOperations.FSStatus command = new FSOperations.FSStatus(path); + @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); + response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java index 524ec09290a..6f2c484addb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/metrics/HttpFSServerMetrics.java @@ -64,6 +64,7 @@ public class HttpFSServerMetrics { private @Metric MutableCounterLong opsListing; private @Metric MutableCounterLong opsStat; private @Metric MutableCounterLong opsCheckAccess; + private @Metric MutableCounterLong opsStatus; private final MetricsRegistry registry = new MetricsRegistry("httpfsserver"); private final String name; @@ -160,4 +161,8 @@ public class HttpFSServerMetrics { public long getOpsStat() { return opsStat.value(); } + + public void incrOpsStatus() { + opsStatus.incr(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 6d131a7cd1d..e61431250b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.RemoteIterator; @@ -1216,7 +1217,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST, GET_SNAPSHOT_LIST, GET_SERVERDEFAULTS, CHECKACCESS, SETECPOLICY, SATISFYSTORAGEPOLICY, GET_SNAPSHOT_DIFF_LISTING, GETFILEBLOCKLOCATIONS, - GETFILELINKSTATUS + GETFILELINKSTATUS, GETSTATUS } private void operation(Operation op) throws Exception { @@ -1362,6 +1363,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { case GETFILELINKSTATUS: testGetFileLinkStatus(); break; + case GETSTATUS: + testGetStatus(); + break; } } @@ -2081,6 +2085,32 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { assertTrue(fs.getFileLinkStatus(linkToFile).isSymlink()); } + private void testGetStatus() throws Exception { + if (isLocalFS()) { + // do not test the getStatus for local FS. + return; + } + final Path path = new Path("/foo"); + FileSystem fs = FileSystem.get(path.toUri(), this.getProxiedFSConf()); + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = + (DistributedFileSystem) FileSystem.get(path.toUri(), this.getProxiedFSConf()); + FileSystem httpFs = this.getHttpFSFileSystem(); + + FsStatus dfsFsStatus = dfs.getStatus(path); + FsStatus httpFsStatus = httpFs.getStatus(path); + + //Validate used free and capacity are the same as DistributedFileSystem + assertEquals(dfsFsStatus.getUsed(), httpFsStatus.getUsed()); + assertEquals(dfsFsStatus.getRemaining(), httpFsStatus.getRemaining()); + assertEquals(dfsFsStatus.getCapacity(), httpFsStatus.getCapacity()); + httpFs.close(); + dfs.close(); + } else { + Assert.fail(fs.getClass().getSimpleName() + " is not of type DistributedFileSystem."); + } + } + private void assertHttpFsReportListingWithDfsClient(SnapshotDiffReportListing diffReportListing, SnapshotDiffReportListing dfsDiffReportListing) { Assert.assertEquals(diffReportListing.getCreateList().size(),