From abe14d32d41b4a2bf630e8a89d794d729cee119b Mon Sep 17 00:00:00 2001 From: Vinayakumar B Date: Tue, 11 Dec 2018 17:59:04 +0530 Subject: [PATCH] HDFS-14124. EC : Support EC Commands (set/get/unset EcPolicy) via WebHdfs. Contributed by Ayush Saxena. (cherry picked from commit 39dc7345b80e27ba8bd1ff4c19ca241aef5ac0fc) --- .../hadoop/hdfs/web/JsonUtilClient.java | 11 +++ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 33 +++++++ .../hadoop/hdfs/web/resources/GetOpParam.java | 2 + .../hdfs/web/resources/PostOpParam.java | 2 + .../hadoop/hdfs/web/resources/PutOpParam.java | 1 + .../web/resources/NamenodeWebHdfsMethods.java | 15 +++- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 89 ++++++++++++++++++- .../apache/hadoop/hdfs/web/TestWebHDFS.java | 18 +++- 8 files changed, 164 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 95ccb4b6497..3889326e823 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -675,6 +675,17 @@ public class JsonUtilClient { replicationFallbacks, copyOnCreateFile.booleanValue()); } + public static ErasureCodingPolicy toECPolicy(Map m) { + byte id = ((Number) m.get("id")).byteValue(); + String name = (String) m.get("name"); + String codec = (String) m.get("codecName"); + int cellsize = ((Number) m.get("cellSize")).intValue(); + int dataunits = ((Number) m.get("numDataUnits")).intValue(); + int parityunits = ((Number) m.get("numParityUnits")).intValue(); + ECSchema ecs = new ECSchema(codec, dataunits, parityunits); + return new ErasureCodingPolicy(name, ecs, cellsize, id); + } + private static StorageType[] toStorageTypes(List list) { if (list == null) { return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 51d4442206f..4f7261359a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.HdfsKMSUtil; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -1311,15 +1312,47 @@ public class WebHdfsFileSystem extends FileSystem } public void enableECPolicy(String policyName) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.ENABLE_EC_POLICY); final HttpOpParam.Op op = PutOpParam.Op.ENABLEECPOLICY; new FsPathRunner(op, null, new ECPolicyParam(policyName)).run(); } public void disableECPolicy(String policyName) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.DISABLE_EC_POLICY); final HttpOpParam.Op op = PutOpParam.Op.DISABLEECPOLICY; new FsPathRunner(op, null, new ECPolicyParam(policyName)).run(); } + public void setErasureCodingPolicy(Path p, String policyName) + throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_EC_POLICY); + final HttpOpParam.Op op = PutOpParam.Op.SETECPOLICY; + new FsPathRunner(op, p, new ECPolicyParam(policyName)).run(); + } + + public void unsetErasureCodingPolicy(Path p) throws IOException { + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.UNSET_EC_POLICY); + final HttpOpParam.Op op = PostOpParam.Op.UNSETECPOLICY; + new FsPathRunner(op, p).run(); + } + + public ErasureCodingPolicy getErasureCodingPolicy(Path p) + throws IOException { + statistics.incrementReadOps(1); + storageStatistics.incrementOpCounter(OpType.GET_EC_POLICY); + final HttpOpParam.Op op =GetOpParam.Op.GETECPOLICY; + return new FsPathResponseRunner(op, p) { + @Override + ErasureCodingPolicy decodeResponse(Map json) throws IOException { + return JsonUtilClient.toECPolicy((Map) json); + } + }.run(); + } + @Override public Path createSnapshot(final Path path, final String snapshotName) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index 3f4f14627a5..cefa12fabca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -53,6 +53,8 @@ public class GetOpParam extends HttpOpParam { GETALLSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK), GETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK), + GETECPOLICY(false, HttpURLConnection.HTTP_OK), + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED), CHECKACCESS(false, HttpURLConnection.HTTP_OK), diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java index 305db46e9c2..cda1c11a967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java @@ -29,6 +29,8 @@ public class PostOpParam extends HttpOpParam { TRUNCATE(false, HttpURLConnection.HTTP_OK), + UNSETECPOLICY(false, HttpURLConnection.HTTP_OK), + UNSETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK), NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java index 7bbd361633f..75b1899668a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java @@ -48,6 +48,7 @@ public class PutOpParam extends HttpOpParam { ENABLEECPOLICY(false, HttpURLConnection.HTTP_OK), DISABLEECPOLICY(false, HttpURLConnection.HTTP_OK), + SETECPOLICY(false, HttpURLConnection.HTTP_OK), ALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK), DISALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 6940f2ca41e..e42b63256ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; @@ -804,12 +805,14 @@ public class NamenodeWebHdfsMethods { validateOpParams(op, ecpolicy); cp.enableErasureCodingPolicy(ecpolicy.getValue()); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); - case DISABLEECPOLICY: validateOpParams(op, ecpolicy); cp.disableErasureCodingPolicy(ecpolicy.getValue()); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); - + case SETECPOLICY: + validateOpParams(op, ecpolicy); + cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue()); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); default: throw new UnsupportedOperationException(op + " is not supported"); } @@ -937,6 +940,9 @@ public class NamenodeWebHdfsMethods { cp.unsetStoragePolicy(fullpath); return Response.ok().build(); } + case UNSETECPOLICY: + cp.unsetErasureCodingPolicy(fullpath); + return Response.ok().build(); default: throw new UnsupportedOperationException(op + " is not supported"); } @@ -1247,6 +1253,11 @@ public class NamenodeWebHdfsMethods { final String js = JsonUtil.toJsonString(storagePolicy); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETECPOLICY: { + ErasureCodingPolicy ecpolicy = cp.getErasureCodingPolicy(fullpath); + final String js = JsonUtil.toJsonString(ecpolicy); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } case GETSERVERDEFAULTS: { // Since none of the server defaults values are hot reloaded, we can // cache the output of serverDefaults. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 383eda09b16..1c696344ebe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -53,6 +53,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`GETSNAPSHOTDIFF`](#Get_Snapshot_Diff) * [`GETSNAPSHOTTABLEDIRECTORYLIST`](#Get_Snapshottable_Directory_List) * [`GETFILEBLOCKLOCATIONS`](#Get_File_Block_Locations) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileBlockLocations) + * [`GETECPOLICY`](#Get_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy) * HTTP PUT * [`CREATE`](#Create_and_Write_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).create) * [`MKDIRS`](#Make_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).mkdirs) @@ -71,11 +72,13 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop * [`SETSTORAGEPOLICY`](#Set_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setStoragePolicy) * [`ENABLEECPOLICY`](#Enable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy) * [`DISABLEECPOLICY`](#Disable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy) + * [`SETECPOLICY`](#Set_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).setErasureCodingPolicy) * HTTP POST * [`APPEND`](#Append_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append) * [`CONCAT`](#Concat_Files) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat) * [`TRUNCATE`](#Truncate_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).truncate) * [`UNSETSTORAGEPOLICY`](#Unset_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).unsetStoragePolicy) + * [`UNSETECPOLICY`](#Unset_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).unsetErasureCodingPolicy) * HTTP DELETE * [`DELETE`](#Delete_a_FileDirectory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).delete) * [`DELETESNAPSHOT`](#Delete_Snapshot) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).deleteSnapshot) @@ -1283,7 +1286,7 @@ Erasure Coding Operations HTTP/1.1 200 OK Content-Length: 0 -See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy) +See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy ### Disable EC Policy @@ -1297,7 +1300,68 @@ See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands). HTTP/1.1 200 OK Content-Length: 0 -See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy) +See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy + +### Set EC Policy + +* Submit a HTTP PUT request. + + curl -i -X PUT "http://:/webhdfs/v1/?op=SETECPOLICY + &ecpolicy=" + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).setErasureCodingPolicy + +### Get EC Policy + +* Submit a HTTP GET request. + + curl -i -X GET "http://:/webhdfs/v1/?op=GETECPOLICY + " + + The client receives a response with a [`ECPolicy` JSON object](#ECPolicy_JSON_Schema): + + + { + "name": "RS-10-4-1024k", + "schema": + { + "codecName": "rs", + "numDataUnits": 10, + "numParityUnits": 4, + "extraOptions": {} + } + "cellSize": 1048576, + "id":5, + "codecname":"rs", + "numDataUnits": 10, + "numParityUnits": 4, + "replicationpolicy":false, + "systemPolicy":true + + } + + + +See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).getErasureCodingPolicy + +### Unset EC Policy + +* Submit a HTTP POST request. + + curl -i -X POST "http://:/webhdfs/v1/?op=UNSETECPOLICY + " + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).unsetErasureCodingPolicy Snapshot Operations ------------------- @@ -2149,6 +2213,26 @@ var blockStoragePolicyProperties = } }; ``` +### ECPolicy JSON Schema + +```json +{ + "name": "RS-10-4-1024k", + schema { + "codecName": "rs", + "numDataUnits": 10, + "numParityUnits": 4, + "extraOptions": {} + } + "cellSize": 1048576, + "id":5, + "codecname":"rs", + "numDataUnits": 10, + "numParityUnits": 4, + "replicationpolicy":false, + "systemPolicy":true +} +``` ### BlockStoragePolicies JSON Schema @@ -2220,6 +2304,7 @@ A `BlockStoragePolicies` JSON object represents an array of `BlockStoragePolicy` } ``` + #### DiffReport Entries JavaScript syntax is used to define `diffReportEntries` so that it can be referred in `SnapshotDiffReport` JSON schema. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 8173bca0e1c..40176573ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -1623,7 +1623,7 @@ public class TestWebHDFS { // Test For Enable/Disable EC Policy in DFS. @Test - public void testEnableDisableECPolicy() throws Exception { + public void testECPolicyCommands() throws Exception { Configuration conf = new HdfsConfiguration(); try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) { @@ -1632,12 +1632,24 @@ public class TestWebHDFS { final WebHdfsFileSystem webHdfs = WebHdfsTestUtil .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); String policy = "RS-10-4-1024k"; - // Check for Enable EC policy via WEBHDFS. dfs.disableErasureCodingPolicy(policy); checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "disable"); - webHdfs.enableECPolicy("RS-10-4-1024k"); + webHdfs.enableECPolicy(policy); checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "enable"); + Path dir = new Path("/tmp"); + dfs.mkdirs(dir); + // Check for Set EC policy via WEBHDFS + assertNull(dfs.getErasureCodingPolicy(dir)); + webHdfs.setErasureCodingPolicy(dir, policy); + assertEquals(policy, dfs.getErasureCodingPolicy(dir).getName()); + + // Check for Get EC policy via WEBHDFS + assertEquals(policy, webHdfs.getErasureCodingPolicy(dir).getName()); + + // Check for Unset EC policy via WEBHDFS + webHdfs.unsetErasureCodingPolicy(dir); + assertNull(dfs.getErasureCodingPolicy(dir)); // Check for Disable EC policy via WEBHDFS. webHdfs.disableECPolicy(policy);