HDFS-17001. Support getStatus API in WebHDFS (#5628). Contributed by Hualong Zhang.
Reviewed-by: Shilun Fan <slfan1989@apache.org> Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
parent
ad1e3a0f5b
commit
0c77629849
|
@ -2056,7 +2056,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
}
|
||||
|
||||
private long getStateAtIndex(long[] states, int index) {
|
||||
public static long getStateAtIndex(long[] states, int index) {
|
||||
return states.length > index ? states[index] : -1;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||
|
@ -822,6 +823,18 @@ public class JsonUtilClient {
|
|||
diffList);
|
||||
}
|
||||
|
||||
public static FsStatus toFsStatus(Map<?, ?> json) {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
Map<?, ?> m =
|
||||
(Map<?, ?>) json.get(FsStatus.class.getSimpleName());
|
||||
long capacity = getLong(m, "capacity", Long.MAX_VALUE);
|
||||
long used = getLong(m, "used", 0);
|
||||
long remaining = getLong(m, "remaining", Long.MAX_VALUE);
|
||||
return new FsStatus(capacity, used, remaining);
|
||||
}
|
||||
|
||||
private static List<SnapshotDiffReport.DiffReportEntry> toDiffList(
|
||||
List<?> objs) {
|
||||
if (objs == null) {
|
||||
|
|
|
@ -72,6 +72,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.GlobalStorageStatistics;
|
||||
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
|
||||
import org.apache.hadoop.fs.MultipartUploaderBuilder;
|
||||
|
@ -2178,6 +2179,19 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
return status.makeQualified(getUri(), f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FsStatus getStatus(Path path) throws IOException {
|
||||
statistics.incrementReadOps(1);
|
||||
storageStatistics.incrementOpCounter(OpType.GET_STATUS);
|
||||
final GetOpParam.Op op = GetOpParam.Op.GETSTATUS;
|
||||
return new FsPathResponseRunner<FsStatus>(op, path) {
|
||||
@Override
|
||||
FsStatus decodeResponse(Map<?, ?> json) {
|
||||
return JsonUtilClient.toFsStatus(json);
|
||||
}
|
||||
}.run();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
InetSocketAddress[] getResolvedNNAddr() {
|
||||
return nnAddrs;
|
||||
|
|
|
@ -66,6 +66,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
|
|||
GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK),
|
||||
GETLINKTARGET(false, HttpURLConnection.HTTP_OK),
|
||||
GETFILELINKSTATUS(false, HttpURLConnection.HTTP_OK),
|
||||
GETSTATUS(false, HttpURLConnection.HTTP_OK),
|
||||
GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
|
||||
|
||||
final boolean redirect;
|
||||
|
|
|
@ -387,6 +387,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
|
|||
case CHECKACCESS:
|
||||
case GETLINKTARGET:
|
||||
case GETFILELINKSTATUS:
|
||||
case GETSTATUS:
|
||||
{
|
||||
return super.get(ugi, delegation, username, doAsUser, fullpath, op,
|
||||
offset, length, renewer, bufferSize, xattrNames, xattrEncoding,
|
||||
|
|
|
@ -73,9 +73,11 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsCreateModes;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
|
@ -1396,6 +1398,15 @@ public class NamenodeWebHdfsMethods {
|
|||
final String js = JsonUtil.toJsonString(status, true);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case GETSTATUS: {
|
||||
long[] states = cp.getStats();
|
||||
FsStatus status = new FsStatus(
|
||||
DFSClient.getStateAtIndex(states, 0),
|
||||
DFSClient.getStateAtIndex(states, 1),
|
||||
DFSClient.getStateAtIndex(states, 2));
|
||||
final String js = JsonUtil.toJsonString(status);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
@ -1535,6 +1546,7 @@ public class NamenodeWebHdfsMethods {
|
|||
};
|
||||
}
|
||||
|
||||
|
||||
/** Handle HTTP DELETE request for the root. */
|
||||
@DELETE
|
||||
@Path("/")
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.fs.ContentSummary;
|
|||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||
import org.apache.hadoop.fs.QuotaUsage;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
|
@ -725,4 +726,19 @@ public class JsonUtil {
|
|||
m.put(BlockLocation.class.getSimpleName(), blockLocations);
|
||||
return m;
|
||||
}
|
||||
|
||||
public static String toJsonString(FsStatus status) {
|
||||
return toJsonString(FsStatus.class, toJsonMap(status));
|
||||
}
|
||||
|
||||
public static Map<String, Object> toJsonMap(FsStatus status) {
|
||||
if (status == null) {
|
||||
return null;
|
||||
}
|
||||
final Map<String, Object> m = new HashMap<>();
|
||||
m.put("capacity", status.getCapacity());
|
||||
m.put("used", status.getUsed());
|
||||
m.put("remaining", status.getRemaining());
|
||||
return m;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
|
|||
* [`GETSERVERDEFAULTS`](#Get_Server_Defaults) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getServerDefaults)
|
||||
* [`GETLINKTARGET`](#Get_Link_Target) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTarget)
|
||||
* [`GETFILELINKSTATUS`](#Get_File_Link_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkStatus)
|
||||
* [`GETSTATUS`](#Get_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStatus)
|
||||
* HTTP PUT
|
||||
* [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
|
||||
* [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
|
||||
|
@ -1190,6 +1191,28 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTa
|
|||
|
||||
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkInfo
|
||||
|
||||
### Get Status
|
||||
|
||||
* Submit a HTTP GET request.
|
||||
|
||||
curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETSTATUS"
|
||||
|
||||
The client receives a response with a [`FsStatus` JSON object](#FsStatus_JSON_Schema):
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/json
|
||||
Transfer-Encoding: chunked
|
||||
|
||||
{
|
||||
"FsStatus": {
|
||||
"used": 29229154304,
|
||||
"remaining": 292893392896,
|
||||
"capacity":322122547200
|
||||
}
|
||||
}
|
||||
|
||||
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStatus
|
||||
|
||||
Storage Policy Operations
|
||||
-------------------------
|
||||
|
||||
|
@ -3141,6 +3164,17 @@ var blockLocationProperties =
|
|||
}
|
||||
}
|
||||
```
|
||||
### FsStatus JSON Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"FsStatus": {
|
||||
"used": 29229154304,
|
||||
"remaining": 292893392896,
|
||||
"capacity": 322122547200
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
HTTP Query Parameter Dictionary
|
||||
-------------------------------
|
||||
|
|
|
@ -88,6 +88,7 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
|
|||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
|
@ -2255,6 +2256,41 @@ public class TestWebHDFS {
|
|||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFsStatus() throws Exception {
|
||||
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
cluster.waitActive();
|
||||
|
||||
final WebHdfsFileSystem webHdfs =
|
||||
WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
||||
WebHdfsConstants.WEBHDFS_SCHEME);
|
||||
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
|
||||
final String path = "/foo";
|
||||
try (OutputStream os = webHdfs.create(new Path(path))) {
|
||||
os.write(new byte[1024]);
|
||||
}
|
||||
|
||||
FsStatus webHdfsFsStatus = webHdfs.getStatus(new Path("/"));
|
||||
Assert.assertNotNull(webHdfsFsStatus);
|
||||
|
||||
FsStatus dfsFsStatus = dfs.getStatus(new Path("/"));
|
||||
Assert.assertNotNull(dfsFsStatus);
|
||||
|
||||
//Validate used free and capacity are the same as DistributedFileSystem
|
||||
Assert.assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed());
|
||||
Assert.assertEquals(webHdfsFsStatus.getRemaining(),
|
||||
dfsFsStatus.getRemaining());
|
||||
Assert.assertEquals(webHdfsFsStatus.getCapacity(),
|
||||
dfsFsStatus.getCapacity());
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get FileStatus JSONObject from ListStatus response.
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue