HDFS-14064. WEBHDFS: Support Enable/Disable EC Policy. Contributed by Ayush Saxena.

(cherry picked from commit 15d8f592b51d08a658e88c6a7a7596e0edf5793b)
(cherry picked from commit 7e10dd03b5)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
This commit is contained in:
Brahma Reddy Battula 2018-11-22 00:06:22 +05:30 committed by Wei-Chiu Chuang
parent aeb3ae1c4c
commit ddea9d68a0
8 changed files with 180 additions and 7 deletions

View File

@ -1308,6 +1308,16 @@ public class WebHdfsFileSystem extends FileSystem
new FsPathRunner(op, p).run(); new FsPathRunner(op, p).run();
} }
public void enableECPolicy(String policyName) throws IOException {
final HttpOpParam.Op op = PutOpParam.Op.ENABLEECPOLICY;
new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
}
public void disableECPolicy(String policyName) throws IOException {
final HttpOpParam.Op op = PutOpParam.Op.DISABLEECPOLICY;
new FsPathRunner(op, null, new ECPolicyParam(policyName)).run();
}
@Override @Override
public Path createSnapshot(final Path path, final String snapshotName) public Path createSnapshot(final Path path, final String snapshotName)
throws IOException { throws IOException {

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/** policy parameter. */
public class ECPolicyParam extends StringParam {
/** Parameter name. */
public static final String NAME = "ecpolicy";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
/**
* Constructor.
*
* @param str a string representation of the parameter value.
*/
public ECPolicyParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -46,6 +46,9 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
SETXATTR(false, HttpURLConnection.HTTP_OK), SETXATTR(false, HttpURLConnection.HTTP_OK),
REMOVEXATTR(false, HttpURLConnection.HTTP_OK), REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
ENABLEECPOLICY(false, HttpURLConnection.HTTP_OK),
DISABLEECPOLICY(false, HttpURLConnection.HTTP_OK),
ALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK), ALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
DISALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK), DISALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK), CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
import org.apache.hadoop.hdfs.web.resources.DelegationParam; import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DestinationParam; import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.ECPolicyParam;
import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam; import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam;
import org.apache.hadoop.hdfs.web.resources.FsActionParam; import org.apache.hadoop.hdfs.web.resources.FsActionParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam;
@ -215,7 +216,8 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
final ExcludeDatanodesParam exclDatanodes, final ExcludeDatanodesParam exclDatanodes,
final CreateFlagParam createFlagParam, final CreateFlagParam createFlagParam,
final NoRedirectParam noredirectParam, final NoRedirectParam noredirectParam,
final StoragePolicyParam policyName final StoragePolicyParam policyName,
final ECPolicyParam ecpolicy
) throws IOException, URISyntaxException { ) throws IOException, URISyntaxException {
switch(op.getValue()) { switch(op.getValue()) {
@ -252,6 +254,8 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
case RENAMESNAPSHOT: case RENAMESNAPSHOT:
case DISALLOWSNAPSHOT: case DISALLOWSNAPSHOT:
case SETSTORAGEPOLICY: case SETSTORAGEPOLICY:
case ENABLEECPOLICY:
case DISABLEECPOLICY:
{ {
// Whitelist operations that can handled by NamenodeWebHdfsMethods // Whitelist operations that can handled by NamenodeWebHdfsMethods
return super.put(ugi, delegation, username, doAsUser, fullpath, op, return super.put(ugi, delegation, username, doAsUser, fullpath, op,
@ -260,7 +264,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
accessTime, renameOptions, createParent, delegationTokenArgument, accessTime, renameOptions, createParent, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam, oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam,
policyName); policyName, ecpolicy);
} }
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");

View File

@ -500,14 +500,16 @@ public class NamenodeWebHdfsMethods {
@QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT) @QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT)
final NoRedirectParam noredirect, final NoRedirectParam noredirect,
@QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
.DEFAULT) final StoragePolicyParam policyName .DEFAULT) final StoragePolicyParam policyName,
@QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam
.DEFAULT) final ECPolicyParam ecpolicy
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination, return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, unmaskedPermission, overwrite, bufferSize, owner, group, permission, unmaskedPermission, overwrite, bufferSize,
replication, blockSize, modificationTime, accessTime, renameOptions, replication, blockSize, modificationTime, accessTime, renameOptions,
createParent, delegationTokenArgument, aclPermission, xattrName, createParent, delegationTokenArgument, aclPermission, xattrName,
xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName,
excludeDatanodes, createFlagParam, noredirect, policyName); excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy);
} }
/** Validate all required params. */ /** Validate all required params. */
@ -587,7 +589,9 @@ public class NamenodeWebHdfsMethods {
@QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT) @QueryParam(NoRedirectParam.NAME) @DefaultValue(NoRedirectParam.DEFAULT)
final NoRedirectParam noredirect, final NoRedirectParam noredirect,
@QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
.DEFAULT) final StoragePolicyParam policyName .DEFAULT) final StoragePolicyParam policyName,
@QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT)
final ECPolicyParam ecpolicy
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner, init(ugi, delegation, username, doAsUser, path, op, destination, owner,
@ -607,7 +611,7 @@ public class NamenodeWebHdfsMethods {
renameOptions, createParent, delegationTokenArgument, renameOptions, createParent, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, aclPermission, xattrName, xattrValue, xattrSetFlag,
snapshotName, oldSnapshotName, excludeDatanodes, snapshotName, oldSnapshotName, excludeDatanodes,
createFlagParam, noredirect, policyName); createFlagParam, noredirect, policyName, ecpolicy);
} }
}); });
} }
@ -642,7 +646,8 @@ public class NamenodeWebHdfsMethods {
final ExcludeDatanodesParam exclDatanodes, final ExcludeDatanodesParam exclDatanodes,
final CreateFlagParam createFlagParam, final CreateFlagParam createFlagParam,
final NoRedirectParam noredirectParam, final NoRedirectParam noredirectParam,
final StoragePolicyParam policyName final StoragePolicyParam policyName,
final ECPolicyParam ecpolicy
) throws IOException, URISyntaxException { ) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
final ClientProtocol cp = getRpcClientProtocol(); final ClientProtocol cp = getRpcClientProtocol();
@ -803,6 +808,16 @@ public class NamenodeWebHdfsMethods {
cp.setStoragePolicy(fullpath, policyName.getValue()); cp.setStoragePolicy(fullpath, policyName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
} }
case ENABLEECPOLICY:
validateOpParams(op, ecpolicy);
cp.enableErasureCodingPolicy(ecpolicy.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
case DISABLEECPOLICY:
validateOpParams(op, ecpolicy);
cp.disableErasureCodingPolicy(ecpolicy.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }

View File

@ -69,6 +69,8 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`SETXATTR`](#Set_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr) * [`SETXATTR`](#Set_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
* [`REMOVEXATTR`](#Remove_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).removeXAttr) * [`REMOVEXATTR`](#Remove_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).removeXAttr)
* [`SETSTORAGEPOLICY`](#Set_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setStoragePolicy) * [`SETSTORAGEPOLICY`](#Set_Storage_Policy) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setStoragePolicy)
* [`ENABLEECPOLICY`](#Enable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy)
* [`DISABLEECPOLICY`](#Disable_EC_Policy) (see [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy)
* HTTP POST * HTTP POST
* [`APPEND`](#Append_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append) * [`APPEND`](#Append_to_a_File) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).append)
* [`CONCAT`](#Concat_Files) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat) * [`CONCAT`](#Concat_Files) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).concat)
@ -1316,6 +1318,37 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listXAttrs See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listXAttrs
Erasure Coding Operations
-------------------------
### Enable EC Policy
* Submit a HTTP PUT request.
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/?op=ENABLEECPOLICY
&ecpolicy=<policy>"
The client receives a response with zero content length:
HTTP/1.1 200 OK
Content-Length: 0
See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).enablePolicy)
### Disable EC Policy
* Submit a HTTP PUT request.
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/?op=DISABLEECPOLICY
&ecpolicy=<policy>"
The client receives a response with zero content length:
HTTP/1.1 200 OK
Content-Length: 0
See also: [HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).disablePolicy)
Snapshot Operations Snapshot Operations
------------------- -------------------
@ -2851,6 +2884,18 @@ See also: [Create and Write to a File](#Create_and_Write_to_a_File)
See also: [`SETSTORAGEPOLICY`](#Set_Storage_Policy) See also: [`SETSTORAGEPOLICY`](#Set_Storage_Policy)
### Erasure Coding Policy
| Name | `ecpolicy` |
|:---- |:---- |
| Description | The name of the erasure coding policy. |
| Type | String |
| Default Value | \<empty\> |
| Valid Values | Any valid erasure coding policy name; |
| Syntax | Any string. |
See also: [`ENABLEECPOLICY`](#Enable_EC_Policy) or [`DISABLEECPOLICY`](#Disable_EC_Policy)
### Start After ### Start After
| Name | `startAfter` | | Name | `startAfter` |

View File

@ -47,6 +47,8 @@ import java.net.URL;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import java.util.Random; import java.util.Random;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
@ -1549,6 +1552,49 @@ public class TestWebHDFS {
} }
} }
private void checkECPolicyState(Collection<ErasureCodingPolicyInfo> policies,
String ecpolicy, String state) {
Iterator<ErasureCodingPolicyInfo> itr = policies.iterator();
boolean found = false;
while (policies.iterator().hasNext()) {
ErasureCodingPolicyInfo policy = itr.next();
if (policy.getPolicy().getName().equals(ecpolicy)) {
found = true;
if (state.equals("disable")) {
Assert.assertTrue(policy.isDisabled());
} else if (state.equals("enable")) {
Assert.assertTrue(policy.isEnabled());
}
break;
}
}
Assert.assertTrue(found);
}
// Test For Enable/Disable EC Policy in DFS.
@Test
public void testEnableDisableECPolicy() throws Exception {
Configuration conf = new HdfsConfiguration();
try (MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(0).build()) {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
String policy = "RS-10-4-1024k";
// Check for Enable EC policy via WEBHDFS.
dfs.disableErasureCodingPolicy(policy);
checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "disable");
webHdfs.enableECPolicy("RS-10-4-1024k");
checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "enable");
// Check for Disable EC policy via WEBHDFS.
webHdfs.disableECPolicy(policy);
checkECPolicyState(dfs.getAllErasureCodingPolicies(), policy, "disable");
}
}
@Test @Test
public void testWebHdfsAppend() throws Exception { public void testWebHdfsAppend() throws Exception {
MiniDFSCluster cluster = null; MiniDFSCluster cluster = null;

View File

@ -505,6 +505,14 @@ public class TestParam {
Assert.assertEquals("COLD", p.getValue()); Assert.assertEquals("COLD", p.getValue());
} }
@Test
public void testECPolicyParam() {
ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);
Assert.assertEquals(null, p.getValue());
p = new ECPolicyParam("RS-6-3-1024k");
Assert.assertEquals("RS-6-3-1024k", p.getValue());
}
@Test @Test
public void testHttpOpParams() { public void testHttpOpParams() {
try { try {