diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 0e5aae894d4..dd6faf0e72a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.QuotaUsage; @@ -253,7 +254,7 @@ public enum Operation { ALLOWSNAPSHOT(HTTP_PUT), DISALLOWSNAPSHOT(HTTP_PUT), CREATESNAPSHOT(HTTP_PUT), DELETESNAPSHOT(HTTP_DELETE), RENAMESNAPSHOT(HTTP_PUT), GETSNAPSHOTDIFF(HTTP_GET), - GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET); + GETSNAPSHOTTABLEDIRECTORYLIST(HTTP_GET), GETSERVERDEFAULTS(HTTP_GET); private String httpMethod; @@ -1591,4 +1592,21 @@ public boolean hasPathCapability(final Path path, final String capability) return super.hasPathCapability(p, capability); } } + + @Override + public FsServerDefaults getServerDefaults() throws IOException { + Map params = new HashMap(); + params.put(OP_PARAM, Operation.GETSERVERDEFAULTS.toString()); + HttpURLConnection conn = + getConnection(Operation.GETSERVERDEFAULTS.getMethod(), params, + new Path(getUri().toString(), "/"), true); + HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); + JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn); + return JsonUtilClient.toFsServerDefaults(json); + } + + @Override + public FsServerDefaults getServerDefaults(Path p) throws IOException { + return getServerDefaults(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 043f3e1a6a6..ff55054e47d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FilterFileSystem; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.GlobFilter; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; @@ -1821,4 +1822,38 @@ public String execute(FileSystem fs) throws IOException { return JsonUtil.toJsonString(sds); } } + + /** + * Executor that performs a getServerDefaults operation. + */ + @InterfaceAudience.Private + public static class FSGetServerDefaults + implements FileSystemAccess.FileSystemExecutor { + + /** + * Creates a getServerDefaults executor. + */ + public FSGetServerDefaults() { + } + + /** + * Executes the filesystem operation. + * @param fs filesystem instance to use. + * @return A JSON string. + * @throws IOException thrown if an IO error occurred. + */ + @Override + public String execute(FileSystem fs) throws IOException { + FsServerDefaults sds = null; + if (fs instanceof DistributedFileSystem) { + DistributedFileSystem dfs = (DistributedFileSystem) fs; + sds = dfs.getServerDefaults(); + } else { + throw new UnsupportedOperationException("getServerDefaults is " + + "not supported for HttpFs on " + fs.getClass() + + ". Please check your fs.defaultFS configuration"); + } + return JsonUtil.toJsonString(sds); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java index 857ec94fa12..45d69ec4e0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java @@ -117,6 +117,7 @@ public class HttpFSParametersProvider extends ParametersProvider { new Class[] {OldSnapshotNameParam.class, SnapshotNameParam.class}); PARAMS_DEF.put(Operation.GETSNAPSHOTTABLEDIRECTORYLIST, new Class[] {}); + PARAMS_DEF.put(Operation.GETSERVERDEFAULTS, new Class[] {}); } public HttpFSParametersProvider() { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index f2ef811dfee..3105e49c363 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -418,6 +418,14 @@ public InputStream run() throws Exception { response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } + case GETSERVERDEFAULTS: { + FSOperations.FSGetServerDefaults command = + new FSOperations.FSGetServerDefaults(); + String js = fsExecute(user, command); + AUDIT_LOG.info("[{}]", "/"); + response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + break; + } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 6380c412905..36b409fd4d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.RemoteIterator; @@ -1141,7 +1142,8 @@ protected enum Operation { LIST_STATUS_BATCH, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT, ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION, - FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST + FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST, + GET_SERVERDEFAULTS } private void operation(Operation op) throws Exception { @@ -1262,7 +1264,11 @@ private void operation(Operation op) throws Exception { case GET_SNAPSHOTTABLE_DIRECTORY_LIST: testGetSnapshottableDirListing(); break; + case GET_SERVERDEFAULTS: + testGetServerDefaults(); + break; } + } @Parameterized.Parameters @@ -1702,4 +1708,33 @@ private void testFileAclsCustomizedUserAndGroupNames() throws Exception { // Clean up proxyFs.delete(new Path(dir), true); } + + private void verifyGetServerDefaults(FileSystem fs, DistributedFileSystem dfs) + throws Exception { + FsServerDefaults sds = null; + if (fs instanceof HttpFSFileSystem) { + HttpFSFileSystem httpFS = (HttpFSFileSystem) fs; + sds = httpFS.getServerDefaults(); + } else if (fs instanceof WebHdfsFileSystem) { + WebHdfsFileSystem webHdfsFileSystem = (WebHdfsFileSystem) fs; + sds = webHdfsFileSystem.getServerDefaults(); + } else { + Assert.fail( + fs.getClass().getSimpleName() + " doesn't support getServerDefaults"); + } + // Verify result with DFS + FsServerDefaults dfssds = dfs.getServerDefaults(); + Assert.assertEquals(JsonUtil.toJsonString(sds), + JsonUtil.toJsonString(dfssds)); + } + + private void testGetServerDefaults() throws Exception { + if (!this.isLocalFS()) { + FileSystem fs = this.getHttpFSFileSystem(); + Path path1 = new Path("/"); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .get(path1.toUri(), this.getProxiedFSConf()); + verifyGetServerDefaults(fs, dfs); + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java index b25061f58d4..d24cd628192 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java @@ -54,6 +54,7 @@ import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; @@ -1636,4 +1637,34 @@ public void testNoRedirect() throws Exception { Assert.assertEquals(28L, checksum.get("length")); Assert.assertEquals("MD5-of-0MD5-of-512CRC32C", checksum.get("algorithm")); } + + private void verifyGetServerDefaults(DistributedFileSystem dfs) + throws Exception { + // Send a request + HttpURLConnection conn = + sendRequestToHttpFSServer("/", "GETSERVERDEFAULTS", ""); + // Should return HTTP_OK + Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); + // Verify the response + BufferedReader reader = + new BufferedReader(new InputStreamReader(conn.getInputStream())); + // The response should be a one-line JSON string. + String dirLst = reader.readLine(); + FsServerDefaults dfsDirLst = dfs.getServerDefaults(); + Assert.assertNotNull(dfsDirLst); + Assert.assertEquals(dirLst, JsonUtil.toJsonString(dfsDirLst)); + } + + @Test + @TestDir + @TestJetty + @TestHdfs + public void testGetServerDefaults() throws Exception { + createHttpFSServer(false, false); + String pathStr1 = "/"; + Path path1 = new Path(pathStr1); + DistributedFileSystem dfs = (DistributedFileSystem) FileSystem + .get(path1.toUri(), TestHdfsHelper.getHdfsConf()); + verifyGetServerDefaults(dfs); + } }