HDFS-17029. Support getECPolices API in WebHDFS (#5698). Contributed by Hualong Zhang.

Reviewed-by: Shilun Fan <slfan1989@apache.org>
Reviewed-by: Tao Li <tomscut@apache.org>
Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
zhtttylz 2023-06-05 20:03:37 +08:00 committed by GitHub
parent 5d6ca13c5c
commit d9980ab40f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 171 additions and 0 deletions

View File

@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyState;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing.DiffReportListingEntry;
@ -835,6 +837,36 @@ public class JsonUtilClient {
return new FsStatus(capacity, used, remaining);
}
public static Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies(
Map<?, ?> json) {
Map<?, ?> erasureCodingPoliciesJson = (Map<?, ?>) json.get("ErasureCodingPolicies");
if (erasureCodingPoliciesJson != null) {
List<?> objs = (List<?>) erasureCodingPoliciesJson.get(ErasureCodingPolicyInfo.class
.getSimpleName());
if (objs != null) {
ErasureCodingPolicyInfo[] erasureCodingPolicies = new ErasureCodingPolicyInfo[objs
.size()];
for (int i = 0; i < objs.size(); i++) {
final Map<?, ?> m = (Map<?, ?>) objs.get(i);
ErasureCodingPolicyInfo erasureCodingPolicyInfo = toECPolicyInfo(m);
erasureCodingPolicies[i] = erasureCodingPolicyInfo;
}
return Arrays.asList(erasureCodingPolicies);
}
}
return new ArrayList<ErasureCodingPolicyInfo>(0);
}
public static ErasureCodingPolicyInfo toECPolicyInfo(Map<?, ?> m) {
if (m == null) {
return null;
}
ErasureCodingPolicy ecPolicy = toECPolicy((Map<?, ?>) m.get("policy"));
String state = getString(m, "state", "DISABLE");
final ErasureCodingPolicyState ecPolicyState = ErasureCodingPolicyState.valueOf(state);
return new ErasureCodingPolicyInfo(ecPolicy, ecPolicyState);
}
private static List<SnapshotDiffReport.DiffReportEntry> toDiffList(
List<?> objs) {
if (objs == null) {

View File

@ -100,6 +100,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@ -2192,6 +2193,19 @@ public class WebHdfsFileSystem extends FileSystem
}.run();
}
public Collection<ErasureCodingPolicyInfo> getAllErasureCodingPolicies()
throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_EC_POLICIES);
final GetOpParam.Op op = GetOpParam.Op.GETECPOLICIES;
return new FsPathResponseRunner<Collection<ErasureCodingPolicyInfo>>(op, null) {
@Override
Collection<ErasureCodingPolicyInfo> decodeResponse(Map<?, ?> json) {
return JsonUtilClient.getAllErasureCodingPolicies(json);
}
}.run();
}
@VisibleForTesting
InetSocketAddress[] getResolvedNNAddr() {
return nnAddrs;

View File

@ -67,6 +67,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
GETLINKTARGET(false, HttpURLConnection.HTTP_OK),
GETFILELINKSTATUS(false, HttpURLConnection.HTTP_OK),
GETSTATUS(false, HttpURLConnection.HTTP_OK),
GETECPOLICIES(false, HttpURLConnection.HTTP_OK),
GETSNAPSHOTLIST(false, HttpURLConnection.HTTP_OK);
final boolean redirect;

View File

@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@ -1407,6 +1408,11 @@ public class NamenodeWebHdfsMethods {
final String js = JsonUtil.toJsonString(status);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETECPOLICIES: {
ErasureCodingPolicyInfo[] ecPolicyInfos = cp.getErasureCodingPolicies();
final String js = JsonUtil.toJsonString(ecPolicyInfos);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
default:
throw new UnsupportedOperationException(op + " is not supported");
}

View File

@ -741,4 +741,27 @@ public class JsonUtil {
m.put("remaining", status.getRemaining());
return m;
}
public static Map<String, Object> toJsonMap(ErasureCodingPolicyInfo ecPolicyInfo) {
if (ecPolicyInfo == null) {
return null;
}
Map<String, Object> m = new HashMap<>();
m.put("policy", ecPolicyInfo.getPolicy());
m.put("state", ecPolicyInfo.getState());
return m;
}
public static String toJsonString(ErasureCodingPolicyInfo[] ecPolicyInfos) {
final Map<String, Object> erasureCodingPolicies = new HashMap<>();
Object[] erasureCodingPolicyInfos = null;
if (ecPolicyInfos != null && ecPolicyInfos.length > 0) {
erasureCodingPolicyInfos = new Object[ecPolicyInfos.length];
for (int i = 0; i < ecPolicyInfos.length; i++) {
erasureCodingPolicyInfos[i] = toJsonMap(ecPolicyInfos[i]);
}
}
erasureCodingPolicies.put("ErasureCodingPolicyInfo", erasureCodingPolicyInfos);
return toJsonString("ErasureCodingPolicies", erasureCodingPolicies);
}
}

View File

@ -61,6 +61,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`GETLINKTARGET`](#Get_Link_Target) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTarget)
* [`GETFILELINKSTATUS`](#Get_File_Link_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkStatus)
* [`GETSTATUS`](#Get_Status) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getStatus)
* [`GETECPOLICIES`](#Get_EC_Policies)
* HTTP PUT
* [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create)
* [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs)
@ -1191,6 +1192,44 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getLinkTa
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileLinkInfo
### Get EC Policies
* Submit a HTTP GET request.
curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETECPOLICIES"
The client receives a response with a [`ECPolicies` JSON object](#EC_Policies_JSON_Schema):
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"ErasureCodingPolicies": {
"ErasureCodingPolicyInfo": [
{
"state": "ENABLED",
"policy": {
"name": "RS-6-3-1024k",
"schema": {
"codecName": "rs",
"numDataUnits": 6,
"numParityUnits": 3,
"extraOptions": {}
},
"cellSize": 1048576,
"id": 1,
"replicationPolicy": false,
"codecName": "rs",
"numDataUnits": 6,
"numParityUnits": 3,
"systemPolicy": true
}
}
]
}
}
### Get Status
* Submit a HTTP GET request.
@ -3175,6 +3214,35 @@ var blockLocationProperties =
}
}
```
### EC Policies JSON Schema
```json
{
"ErasureCodingPolicies": {
"ErasureCodingPolicyInfo": [
{
"state": "ENABLED",
"policy": {
"name": "RS-6-3-1024k",
"schema": {
"codecName": "rs",
"numDataUnits": 6,
"numParityUnits": 3,
"extraOptions": {}
},
"cellSize": 1048576,
"id": 1,
"replicationPolicy": false,
"codecName": "rs",
"numDataUnits": 6,
"numParityUnits": 3,
"systemPolicy": true
}
}
]
}
}
```
HTTP Query Parameter Dictionary
-------------------------------

View File

@ -2291,6 +2291,33 @@ public class TestWebHDFS {
}
}
@Test
public void testGetErasureCodingPolicies() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
final WebHdfsFileSystem webHdfs =
WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
Collection<ErasureCodingPolicyInfo> webHdfsEcPolicyInfos =
webHdfs.getAllErasureCodingPolicies();
Collection<ErasureCodingPolicyInfo> dfsEcPolicyInfos =
dfs.getAllErasureCodingPolicies();
//Validate erasureCodingPolicyInfos are the same as DistributedFileSystem
assertEquals(dfsEcPolicyInfos.size(), webHdfsEcPolicyInfos.size());
assertTrue(dfsEcPolicyInfos.containsAll(webHdfsEcPolicyInfos));
} finally {
cluster.shutdown();
}
}
/**
* Get FileStatus JSONObject from ListStatus response.
*/