HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar B)

This commit is contained in:
Vinayakumar B 2015-04-15 16:38:22 +05:30 committed by Zhe Zhang
parent 2c277802c1
commit ceb3d1c170
18 changed files with 502 additions and 5 deletions

View File

@ -111,6 +111,10 @@ public class FsShell extends Configured implements Tool {
return getTrash().getCurrentTrashDir();
}
protected String getUsagePrefix() {
return usagePrefix;
}
// NOTE: Usage/Help are inner classes to allow access to outer methods
// that access commandFactory
@ -194,7 +198,7 @@ public class FsShell extends Configured implements Tool {
}
} else {
// display help or usage for all commands
out.println(usagePrefix);
out.println(getUsagePrefix());
// display list of short usages
ArrayList<Command> instances = new ArrayList<Command>();
@ -218,7 +222,7 @@ public class FsShell extends Configured implements Tool {
}
private void printInstanceUsage(PrintStream out, Command instance) {
out.println(usagePrefix + " " + instance.getUsage());
out.println(getUsagePrefix() + " " + instance.getUsage());
}
private void printInstanceHelp(PrintStream out, Command instance) {

View File

@ -79,4 +79,6 @@
operation fails. (Rakesh R via Zhe Zhang)
HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
separate erasurecoding proto file (Rakesh R via vinayakumarb)
separate erasurecoding proto file (Rakesh R via vinayakumarb)
HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)

View File

@ -134,6 +134,11 @@ case ${COMMAND} in
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
erasurecode)
CLASS=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;
fetchdt)
CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
;;

View File

@ -120,6 +120,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -3351,4 +3352,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
return scope;
}
/**
* Get the erasure coding zone information for the specified path
*
* @param src path to get the information for
* @return Returns the zone information if path is in EC Zone, null otherwise
* @throws IOException
*/
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
checkOpen();
try {
return namenode.getErasureCodingZoneInfo(src);
} catch (RemoteException re) {
throw re.unwrapRemoteException(FileNotFoundException.class,
AccessControlException.class, UnresolvedPathException.class);
}
}
}

View File

@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -2306,4 +2307,35 @@ public class DistributedFileSystem extends FileSystem {
}
}.resolve(this, absF);
}
/**
* Get ErasureCoding zone information for the specified path
*
* @param path
* @return Returns the zone information if path is in EC zone, null otherwise
* @throws IOException
*/
public ECZoneInfo getErasureCodingZoneInfo(final Path path)
throws IOException {
Path absF = fixRelativePart(path);
return new FileSystemLinkResolver<ECZoneInfo>() {
@Override
public ECZoneInfo doCall(final Path p) throws IOException,
UnresolvedLinkException {
return dfs.getErasureCodingZoneInfo(getPathName(p));
}
@Override
public ECZoneInfo next(final FileSystem fs, final Path p)
throws IOException {
if (fs instanceof DistributedFileSystem) {
DistributedFileSystem myDfs = (DistributedFileSystem) fs;
return myDfs.getErasureCodingZoneInfo(p);
}
throw new UnsupportedOperationException(
"Cannot getErasureCodingZoneInfo through a symlink to a "
+ "non-DistributedFileSystem: " + path + " -> " + p);
}
}.resolve(this, absF);
}
}

View File

@ -1484,4 +1484,13 @@ public interface ClientProtocol {
*/
@Idempotent
public ECSchema[] getECSchemas() throws IOException;
/**
* Get the information about the EC zone for the path
*
* @param src path to get the info for
* @throws IOException
*/
@Idempotent
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException;
}

View File

@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.io.erasurecode.ECSchema;
/**
* Information about the EC Zone at the specified path.
*/
public class ECZoneInfo {
private String dir;
private ECSchema schema;
public ECZoneInfo(String dir, ECSchema schema) {
this.dir = dir;
this.schema = schema;
}
/**
* Get directory of the EC zone.
*
* @return
*/
public String getDir() {
return dir;
}
/**
* Get the schema for the EC Zone
*
* @return
*/
public ECSchema getSchema() {
return schema;
}
@Override
public String toString() {
return "Dir: " + getDir() + ", Schema: " + schema;
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@ -201,6 +202,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptio
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@ -1551,4 +1554,19 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e);
}
}
@Override
public GetECZoneInfoResponseProto getErasureCodingZoneInfo(RpcController controller,
GetECZoneInfoRequestProto request) throws ServiceException {
try {
ECZoneInfo ecZoneInfo = server.getErasureCodingZoneInfo(request.getSrc());
GetECZoneInfoResponseProto.Builder builder = GetECZoneInfoResponseProto.newBuilder();
if (ecZoneInfo != null) {
builder.setECZoneInfo(PBHelper.convertECZoneInfo(ecZoneInfo));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -167,6 +168,8 @@ import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathR
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECSchemasResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetECZoneInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CreateErasureCodingZoneRequestProto;
@ -1576,4 +1579,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
GetECZoneInfoRequestProto req = GetECZoneInfoRequestProto.newBuilder()
.setSrc(src).build();
try {
GetECZoneInfoResponseProto response = rpcProxy.getErasureCodingZoneInfo(
null, req);
if (response.hasECZoneInfo()) {
return PBHelper.convertECZoneInfo(response.getECZoneInfo());
}
return null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -76,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@ -134,6 +135,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportC
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaOptionEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECZoneInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@ -3138,4 +3140,14 @@ public class PBHelper {
}
return builder.build();
}
public static ECZoneInfoProto convertECZoneInfo(ECZoneInfo ecZoneInfo) {
return ECZoneInfoProto.newBuilder().setDir(ecZoneInfo.getDir())
.setSchema(convertECSchema(ecZoneInfo.getSchema())).build();
}
public static ECZoneInfo convertECZoneInfo(ECZoneInfoProto ecZoneInfoProto) {
return new ECZoneInfo(ecZoneInfoProto.getDir(),
convertECSchema(ecZoneInfoProto.getSchema()));
}
}

View File

@ -22,6 +22,7 @@ import com.google.common.collect.Lists;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.ECSchemaProto;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.io.erasurecode.ECSchema;
@ -57,7 +58,12 @@ public class ErasureCodingZoneManager {
return getECSchema(iip) != null;
}
ECSchema getECSchema(INodesInPath iip) throws IOException{
ECSchema getECSchema(INodesInPath iip) throws IOException {
ECZoneInfo ecZoneInfo = getECZoneInfo(iip);
return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
}
ECZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
assert dir.hasReadLock();
Preconditions.checkNotNull(iip);
List<INode> inodes = iip.getReadOnlyINodes();
@ -80,7 +86,8 @@ public class ErasureCodingZoneManager {
if (XATTR_ERASURECODING_ZONE.equals(XAttrHelper.getPrefixName(xAttr))) {
ECSchemaProto ecSchemaProto;
ecSchemaProto = ECSchemaProto.parseFrom(xAttr.getValue());
return PBHelper.convertECSchema(ecSchemaProto);
ECSchema schema = PBHelper.convertECSchema(ecSchemaProto);
return new ECZoneInfo(inode.getFullPathName(), schema);
}
}
}

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
@ -1249,6 +1250,15 @@ public class FSDirectory implements Closeable {
}
}
ECZoneInfo getECZoneInfo(INodesInPath iip) throws IOException {
readLock();
try {
return ecZoneManager.getECZoneInfo(iip);
} finally {
readUnlock();
}
}
static INode resolveLastINode(INodesInPath iip) throws FileNotFoundException {
INode inode = iip.getLastINode();
if (inode == null) {

View File

@ -181,6 +181,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -7593,6 +7594,29 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
return null;
}
/**
* Get the erasure coding zone information for specified path
*/
ECZoneInfo getErasureCodingZoneInfo(String src) throws AccessControlException,
UnresolvedLinkException, IOException {
checkOperation(OperationCategory.READ);
final byte[][] pathComponents = FSDirectory
.getPathComponentsForReservedPath(src);
final FSPermissionChecker pc = getPermissionChecker();
readLock();
try {
checkOperation(OperationCategory.READ);
src = dir.resolvePath(pc, src, pathComponents);
final INodesInPath iip = dir.getINodesInPath(src, true);
if (isPermissionEnabled) {
dir.checkPathAccess(pc, iip, FsAction.READ);
}
return dir.getECZoneInfo(iip);
} finally {
readUnlock();
}
}
/**
* Get available ECSchemas
*/

View File

@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ECInfo;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSLimitException;
@ -2043,4 +2044,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
checkNNStartup();
return namesystem.getECSchemas();
}
@Override // ClientProtocol
public ECZoneInfo getErasureCodingZoneInfo(String src) throws IOException {
checkNNStartup();
return namesystem.getErasureCodingZoneInfo(src);
}
}

View File

@ -0,0 +1,48 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.tools.erasurecode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.ToolRunner;
/**
* CLI for the erasure code encoding operations.
*/
public class ECCli extends FsShell {
private final static String usagePrefix =
"Usage: hdfs erasurecode [generic options]";
@Override
protected String getUsagePrefix() {
return usagePrefix;
}
@Override
protected void registerCommands(CommandFactory factory) {
factory.registerCommands(ECCommand.class);
}
public static void main(String[] args) throws Exception {
Configuration conf = new HdfsConfiguration();
int res = ToolRunner.run(conf, new ECCli(), args);
System.exit(res);
}
}

View File

@ -0,0 +1,209 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.hdfs.tools.erasurecode;
import java.io.IOException;
import java.util.LinkedList;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.PathData;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ECZoneInfo;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.erasurecode.ECSchema;
import org.apache.hadoop.util.StringUtils;
/**
* Erasure Coding CLI commands
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class ECCommand extends Command {
public static void registerCommands(CommandFactory factory) {
// Register all commands of Erasure CLI, with a '-' at the beginning in name
// of the command.
factory.addClass(CreateECZoneCommand.class, "-" + CreateECZoneCommand.NAME);
factory.addClass(GetECZoneInfoCommand.class, "-"
+ GetECZoneInfoCommand.NAME);
factory.addClass(ListECSchemas.class, "-" + ListECSchemas.NAME);
}
@Override
public String getCommandName() {
return getName();
}
@Override
protected void run(Path path) throws IOException {
throw new RuntimeException("Not suppose to get here");
}
@Deprecated
@Override
public int runAll() {
return run(args);
}
@Override
protected void processPath(PathData item) throws IOException {
if (!(item.fs instanceof DistributedFileSystem)) {
throw new UnsupportedActionException(
"Erasure commands are only supported for the HDFS paths");
}
}
/**
* Create EC encoding zone command. Zones are created to use specific EC
* encoding schema, other than default while encoding the files under some
* specific directory.
*/
static class CreateECZoneCommand extends ECCommand {
public static final String NAME = "createZone";
public static final String USAGE = "[-s <schemaName>] <path>";
public static final String DESCRIPTION =
"Create a zone to encode files using a specified schema\n"
+ "Options :\n"
+ " -s <schemaName> : EC schema name to encode files. "
+ "If not passed default schema will be used\n"
+ " <path> : Path to an empty directory. Under this directory "
+ "files will be encoded using specified schema";
private String schemaName;
private ECSchema schema = null;
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
schemaName = StringUtils.popOptionWithArgument("-s", args);
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
@Override
protected void processPath(PathData item) throws IOException {
super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try {
if (schemaName != null) {
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
for (ECSchema ecSchema : ecSchemas) {
if (schemaName.equals(ecSchema.getSchemaName())) {
schema = ecSchema;
break;
}
}
if (schema == null) {
StringBuilder sb = new StringBuilder();
sb.append("Schema '");
sb.append(schemaName);
sb.append("' does not match any of the supported schemas.");
sb.append("Please select any one of [");
for (ECSchema ecSchema : ecSchemas) {
sb.append(ecSchema.getSchemaName());
sb.append(", ");
}
throw new HadoopIllegalArgumentException(sb.toString());
}
}
dfs.createErasureCodingZone(item.path, schema);
out.println("EC Zone created successfully at " + item.path);
} catch (IOException e) {
throw new IOException("Unable to create EC zone for the path "
+ item.path, e);
}
}
}
/**
* Get the information about the zone
*/
static class GetECZoneInfoCommand extends ECCommand {
public static final String NAME = "getZoneInfo";
public static final String USAGE = "<path>";
public static final String DESCRIPTION =
"Get information about the EC zone at specified path\n";
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
@Override
protected void processPath(PathData item) throws IOException {
super.processPath(item);
DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
try {
ECZoneInfo ecZoneInfo = dfs.getErasureCodingZoneInfo(item.path);
out.println(ecZoneInfo.toString());
} catch (IOException e) {
throw new IOException("Unable to create EC zone for the path "
+ item.path, e);
}
}
}
/**
* List all supported EC Schemas
*/
static class ListECSchemas extends ECCommand {
public static final String NAME = "listSchemas";
public static final String USAGE = "";
public static final String DESCRIPTION =
"Get the list of ECSchemas supported\n";
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
if (!args.isEmpty()) {
throw new HadoopIllegalArgumentException("Too many parameters");
}
FileSystem fs = FileSystem.get(getConf());
if (fs instanceof DistributedFileSystem == false) {
throw new UnsupportedActionException(
"Erasure commands are only supported for the HDFS");
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
ECSchema[] ecSchemas = dfs.getClient().getECSchemas();
StringBuilder sb = new StringBuilder();
int i = 0;
while (i < ecSchemas.length) {
ECSchema ecSchema = ecSchemas[i];
sb.append(ecSchema.getSchemaName());
i++;
if (i < ecSchemas.length) {
sb.append(", ");
}
}
out.println(sb.toString());
}
}
}

View File

@ -867,4 +867,6 @@ service ClientNamenodeProtocol {
returns(GetErasureCodingInfoResponseProto);
rpc getECSchemas(GetECSchemasRequestProto)
returns(GetECSchemasResponseProto);
rpc getErasureCodingZoneInfo(GetECZoneInfoRequestProto)
returns(GetECZoneInfoResponseProto);
}

View File

@ -49,6 +49,13 @@ message ECInfoProto {
required ECSchemaProto schema = 2;
}
/**
* ECZoneInfo
*/
message ECZoneInfoProto {
required string dir = 1;
required ECSchemaProto schema = 2;
}
message CreateErasureCodingZoneRequestProto {
required string src = 1;
@ -72,3 +79,11 @@ message GetECSchemasRequestProto { // void request
message GetECSchemasResponseProto {
repeated ECSchemaProto schemas = 1;
}
message GetECZoneInfoRequestProto {
required string src = 1; // path to get the zone info
}
message GetECZoneInfoResponseProto {
optional ECZoneInfoProto ECZoneInfo = 1;
}