diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java index d53d598c932..fcf13492f7b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java @@ -127,28 +127,20 @@ public final class CodecRegistry { /** * Get all coder names of the given codec. * @param codecName the name of codec - * @return an array of all coder names + * @return an array of all coder names, null if not exist */ public String[] getCoderNames(String codecName) { String[] coderNames = coderNameMap.get(codecName); - if (coderNames == null) { - throw new IllegalArgumentException("No available raw coder factory for " - + codecName); - } return coderNames; } /** * Get all coder factories of the given codec. * @param codecName the name of codec - * @return a list of all coder factories + * @return a list of all coder factories, null if not exist */ public List getCoders(String codecName) { List coders = coderMap.get(codecName); - if (coders == null) { - throw new IllegalArgumentException("No available raw coder factory for " - + codecName); - } return coders; } @@ -164,7 +156,7 @@ public final class CodecRegistry { * Get a specific coder factory defined by codec name and coder name. * @param codecName name of the codec * @param coderName name of the coder - * @return the specific coder + * @return the specific coder, null if not exist */ public RawErasureCoderFactory getCoderByName( String codecName, String coderName) { @@ -176,10 +168,7 @@ public final class CodecRegistry { return coder; } } - - // if not found, throw exception - throw new IllegalArgumentException("No implementation for coder " - + coderName + " of codec " + codecName); + return null; } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java index 6cdbb37a022..75b8fa546c1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java @@ -153,6 +153,10 @@ public final class CodecUtil { return fact; } + public static boolean hasCodec(String codecName) { + return (CodecRegistry.getInstance().getCoderNames(codecName) != null); + } + // Return a list of coder names private static String[] getRawCoderNames( Configuration conf, String codecName) { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRegistry.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRegistry.java index eb365572f58..5f17024d210 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRegistry.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRegistry.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Set; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; /** @@ -67,10 +68,11 @@ public class TestCodecRegistry { assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory); } - @Test(expected = IllegalArgumentException.class) + @Test public void testGetCodersWrong() { List coders = CodecRegistry.getInstance().getCoders("WRONG_CODEC"); + assertNull(coders); } @Test @@ -123,10 +125,11 @@ public class TestCodecRegistry { assertTrue(coder instanceof NativeXORRawErasureCoderFactory); } - @Test(expected = IllegalArgumentException.class) + @Test public void testGetCoderByNameWrong() { RawErasureCoderFactory coder = CodecRegistry.getInstance(). getCoderByName(ErasureCodeConstants.RS_CODEC_NAME, "WRONG_RS"); + assertNull(coder); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 2e7dc1e5c15..4fa7c5f7f31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -101,7 +101,7 @@ import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -2770,7 +2770,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } } - public AddingECPolicyResponse[] addErasureCodingPolicies( + public AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { checkOpen(); return namenode.addErasureCodingPolicies(policies); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 718c0b797bf..b65f9c2c217 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -71,7 +71,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -2549,13 +2549,16 @@ public class DistributedFileSystem extends FileSystem { } /** - * Add Erasure coding policies to HDFS. + * Add Erasure coding policies to HDFS. For each policy input, schema and + * cellSize are musts, name and id are ignored. They will be automatically + * created and assigned by Namenode once the policy is successfully added, and + * will be returned in the response. * * @param policies The user defined ec policy list to add. * @return Return the response list of adding operations. * @throws IOException */ - public AddingECPolicyResponse[] addErasureCodingPolicies( + public AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { return dfs.addErasureCodingPolicies(policies); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddingECPolicyResponse.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java similarity index 86% rename from hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddingECPolicyResponse.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java index ab39f09383b..e7a8435f453 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddingECPolicyResponse.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/AddECPolicyResponse.java @@ -18,26 +18,26 @@ package org.apache.hadoop.hdfs.protocol; /** - * A response of adding an ErasureCoding policy. + * A response of add an ErasureCoding policy. */ -public class AddingECPolicyResponse { +public class AddECPolicyResponse { private boolean succeed; private ErasureCodingPolicy policy; private String errorMsg; - public AddingECPolicyResponse(ErasureCodingPolicy policy) { + public AddECPolicyResponse(ErasureCodingPolicy policy) { this.policy = policy; this.succeed = true; } - public AddingECPolicyResponse(ErasureCodingPolicy policy, + public AddECPolicyResponse(ErasureCodingPolicy policy, String errorMsg) { this.policy = policy; this.errorMsg = errorMsg; this.succeed = false; } - public AddingECPolicyResponse(ErasureCodingPolicy policy, + public AddECPolicyResponse(ErasureCodingPolicy policy, IllegalECPolicyException e) { this(policy, e.getMessage()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 4b2b7d25c9b..bf8eb4ee382 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1526,14 +1526,17 @@ public interface ClientProtocol { throws IOException; /** - * Add Erasure coding policies. + * Add Erasure coding policies to HDFS. For each policy input, schema and + * cellSize are musts, name and id are ignored. They will be automatically + * created and assigned by Namenode once the policy is successfully added, and + * will be returned in the response. * * @param policies The user defined ec policy list to add. * @return Return the response list of adding operations. * @throws IOException */ @AtMostOnce - AddingECPolicyResponse[] addErasureCodingPolicies( + AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 99bc4e6ba9d..b63d2c06e01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -41,6 +41,8 @@ public final class ErasureCodingPolicy { Preconditions.checkNotNull(name); Preconditions.checkNotNull(schema); Preconditions.checkArgument(cellSize > 0, "cellSize must be positive"); + Preconditions.checkArgument(cellSize % 1024 == 0, + "cellSize must be 1024 aligned"); this.name = name; this.schema = schema; this.cellSize = cellSize; @@ -51,8 +53,13 @@ public final class ErasureCodingPolicy { this(composePolicyName(schema, cellSize), schema, cellSize, id); } + public ErasureCodingPolicy(ECSchema schema, int cellSize) { + this(composePolicyName(schema, cellSize), schema, cellSize, (byte) -1); + } + public static String composePolicyName(ECSchema schema, int cellSize) { - assert cellSize % 1024 == 0; + Preconditions.checkArgument(cellSize % 1024 == 0, + "cellSize must be 1024 aligned"); return schema.getCodecName().toUpperCase() + "-" + schema.getNumDataUnits() + "-" + schema.getNumParityUnits() + "-" + cellSize / 1024 + "k"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 85bbc5f1ad5..19127d63eb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -49,7 +49,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.inotify.EventBatchList; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -1637,7 +1637,7 @@ public class ClientNamenodeProtocolTranslatorPB implements } @Override - public AddingECPolicyResponse[] addErasureCodingPolicies( + public AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { List protos = Arrays.stream(policies) .map(PBHelperClient::convertErasureCodingPolicy) @@ -1648,9 +1648,9 @@ public class ClientNamenodeProtocolTranslatorPB implements try { AddErasureCodingPoliciesResponseProto rep = rpcProxy .addErasureCodingPolicies(null, req); - AddingECPolicyResponse[] responses = rep.getResponsesList().stream() - .map(PBHelperClient::convertAddingECPolicyResponse) - .toArray(AddingECPolicyResponse[]::new); + AddECPolicyResponse[] responses = rep.getResponsesList().stream() + .map(PBHelperClient::convertAddECPolicyResponse) + .toArray(AddECPolicyResponse[]::new); return responses; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index dc9a1abbff9..141a05a4420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.inotify.Event; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.inotify.EventBatchList; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockType; @@ -124,7 +124,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmS import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AccessModeProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddingECPolicyResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.AddECPolicyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto; @@ -2709,10 +2709,10 @@ public class PBHelperClient { return builder.build(); } - public static AddingECPolicyResponseProto convertAddingECPolicyResponse( - AddingECPolicyResponse response) { - AddingECPolicyResponseProto.Builder builder = - AddingECPolicyResponseProto.newBuilder() + public static AddECPolicyResponseProto convertAddECPolicyResponse( + AddECPolicyResponse response) { + AddECPolicyResponseProto.Builder builder = + AddECPolicyResponseProto.newBuilder() .setPolicy(convertErasureCodingPolicy(response.getPolicy())) .setSucceed(response.isSucceed()); if (!response.isSucceed()) { @@ -2721,13 +2721,13 @@ public class PBHelperClient { return builder.build(); } - public static AddingECPolicyResponse convertAddingECPolicyResponse( - AddingECPolicyResponseProto proto) { + public static AddECPolicyResponse convertAddECPolicyResponse( + AddECPolicyResponseProto proto) { ErasureCodingPolicy policy = convertErasureCodingPolicy(proto.getPolicy()); if (proto.getSucceed()) { - return new AddingECPolicyResponse(policy); + return new AddECPolicyResponse(policy); } else { - return new AddingECPolicyResponse(policy, proto.getErrorMsg()); + return new AddECPolicyResponse(policy, proto.getErrorMsg()); } } @@ -2765,4 +2765,4 @@ public class PBHelperClient { } return ret; } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java index 02ae25596bb..fb15926dce1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ECPolicyLoader.java @@ -316,7 +316,7 @@ public class ECPolicyLoader { } if (schema != null && cellSize > 0) { - return new ErasureCodingPolicy(schema, cellSize, (byte) -1); + return new ErasureCodingPolicy(schema, cellSize); } else { throw new RuntimeException("Bad policy is found in" + " EC policy configuration file"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto index 740d47d2d63..ea878ec9742 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto @@ -58,7 +58,7 @@ message AddErasureCodingPoliciesRequestProto { } message AddErasureCodingPoliciesResponseProto { - repeated AddingECPolicyResponseProto responses = 1; + repeated AddECPolicyResponseProto responses = 1; } message UnsetErasureCodingPolicyRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 08ed3c8e6a6..10ea5d7d728 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -371,7 +371,7 @@ message ErasureCodingPolicyProto { required uint32 id = 4; // Actually a byte - only 8 bits used } -message AddingECPolicyResponseProto { +message AddECPolicyResponseProto { required ErasureCodingPolicyProto policy = 1; required bool succeed = 2; optional string errorMsg = 3; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java index 643bbe782d8..17fb01c8d2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestErasureCodingPolicy.java @@ -56,11 +56,11 @@ public class TestErasureCodingPolicy { @Test public void testEqualsAndHashCode() { ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{ - new ErasureCodingPolicy("one", SCHEMA_1, 321, (byte) 1), - new ErasureCodingPolicy("two", SCHEMA_1, 321, (byte) 1), - new ErasureCodingPolicy("one", SCHEMA_2, 321, (byte) 1), - new ErasureCodingPolicy("one", SCHEMA_1, 123, (byte) 1), - new ErasureCodingPolicy("one", SCHEMA_1, 321, (byte) 3), + new ErasureCodingPolicy("one", SCHEMA_1, 1024, (byte) 1), + new ErasureCodingPolicy("two", SCHEMA_1, 1024, (byte) 1), + new ErasureCodingPolicy("one", SCHEMA_2, 1024, (byte) 1), + new ErasureCodingPolicy("one", SCHEMA_1, 2048, (byte) 1), + new ErasureCodingPolicy("one", SCHEMA_1, 1024, (byte) 3), }; for (int i = 0; i < policies.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/user_ec_policies.xml.template b/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/user_ec_policies.xml.template index 66546bbb61d..8510b35aae6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/user_ec_policies.xml.template +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/conf/user_ec_policies.xml.template @@ -61,6 +61,7 @@ XORk2m1 + 131072 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b95c7e6d32c..b3784b225b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -570,6 +570,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_KEY = "dfs.namenode.ec.policies.enabled"; public static final String DFS_NAMENODE_EC_POLICIES_ENABLED_DEFAULT = ""; + public static final String DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY = "dfs.namenode.ec.policies.max.cellsize"; + public static final int DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT = 4 * 1024 * 1024; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_KEY = "dfs.datanode.ec.reconstruction.stripedread.threads"; public static final int DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_THREADS_DEFAULT = 20; public static final String DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY = "dfs.datanode.ec.reconstruction.stripedread.buffer.size"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index af4cd9257ad..f10ce44552d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.QuotaUsage; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -1649,11 +1649,11 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements ErasureCodingPolicy[] policies = request.getEcPoliciesList().stream() .map(PBHelperClient::convertErasureCodingPolicy) .toArray(ErasureCodingPolicy[]::new); - AddingECPolicyResponse[] result = server + AddECPolicyResponse[] result = server .addErasureCodingPolicies(policies); - List responseProtos = Arrays - .stream(result).map(PBHelperClient::convertAddingECPolicyResponse) + List responseProtos = Arrays + .stream(result).map(PBHelperClient::convertAddECPolicyResponse) .collect(Collectors.toList()); AddErasureCodingPoliciesResponseProto response = AddErasureCodingPoliciesResponseProto.newBuilder() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 4f27ed8a403..0feb79c2ef5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -25,7 +25,10 @@ import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import java.util.List; +import org.apache.hadoop.io.erasurecode.CodecUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; @@ -40,7 +43,12 @@ import java.util.stream.Stream; */ @InterfaceAudience.LimitedPrivate({"HDFS"}) public final class ErasureCodingPolicyManager { - private static final byte USER_DEFINED_POLICY_START_ID = 32; + + public static Logger LOG = LoggerFactory.getLogger( + ErasureCodingPolicyManager.class); + private static final byte USER_DEFINED_POLICY_START_ID = (byte) 64; + private int maxCellSize = + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT; // Supported storage policies for striped EC files private static final byte[] SUITABLE_STORAGE_POLICIES_FOR_EC_STRIPED_MODE = @@ -77,10 +85,6 @@ public final class ErasureCodingPolicyManager { private ErasureCodingPolicyManager() {} public void init(Configuration conf) { - this.loadPolicies(conf); - } - - private void loadPolicies(Configuration conf) { // Populate the list of enabled policies from configuration final String[] policyNames = conf.getTrimmedStrings( DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, @@ -95,20 +99,30 @@ public final class ErasureCodingPolicyManager { ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByName(policyName); if (ecPolicy == null) { - String sysPolicies = SystemErasureCodingPolicies.getPolicies().stream() - .map(ErasureCodingPolicy::getName) - .collect(Collectors.joining(", ")); - String msg = String.format("EC policy '%s' specified at %s is not a " + - "valid policy. Please choose from list of available policies: " + - "[%s]", - policyName, - DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - sysPolicies); - throw new IllegalArgumentException(msg); + ecPolicy = userPoliciesByName.get(policyName); + if (ecPolicy == null) { + String allPolicies = SystemErasureCodingPolicies.getPolicies() + .stream().map(ErasureCodingPolicy::getName) + .collect(Collectors.joining(", ")) + ", " + + userPoliciesByName.values().stream() + .map(ErasureCodingPolicy::getName) + .collect(Collectors.joining(", ")); + String msg = String.format("EC policy '%s' specified at %s is not a " + + "valid policy. Please choose from list of available " + + "policies: [%s]", + policyName, + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, + allPolicies); + throw new IllegalArgumentException(msg); + } } enabledPoliciesByName.put(ecPolicy.getName(), ecPolicy); } + maxCellSize = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_KEY, + DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_MAX_CELLSIZE_DEFAULT); + /** * TODO: HDFS-7859 persist into NameNode * load persistent policies from image and editlog, which is done only once @@ -153,9 +167,10 @@ public final class ErasureCodingPolicyManager { * Get all system defined policies and user defined policies. * @return all policies */ - public List getPolicies() { + public ErasureCodingPolicy[] getPolicies() { return Stream.concat(SystemErasureCodingPolicies.getPolicies().stream(), - this.userPoliciesByID.values().stream()).collect(Collectors.toList()); + userPoliciesByName.values().stream()) + .toArray(ErasureCodingPolicy[]::new); } /** @@ -190,24 +205,37 @@ public final class ErasureCodingPolicyManager { // This is a placeholder for HDFS-7337. } - public synchronized void addPolicy(ErasureCodingPolicy policy) + public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy) throws IllegalECPolicyException { + if (!CodecUtil.hasCodec(policy.getCodecName())) { + throw new IllegalECPolicyException("Codec name " + + policy.getCodecName() + " is not supported"); + } + + if (policy.getCellSize() > maxCellSize) { + throw new IllegalECPolicyException("Cell size " + policy.getCellSize() + + " should not exceed maximum " + maxCellSize + " byte"); + } + String assignedNewName = ErasureCodingPolicy.composePolicyName( policy.getSchema(), policy.getCellSize()); for (ErasureCodingPolicy p : getPolicies()) { if (p.getName().equals(assignedNewName)) { - throw new IllegalECPolicyException("The policy name already exists"); + throw new IllegalECPolicyException("The policy name " + assignedNewName + + " already exists"); } if (p.getSchema().equals(policy.getSchema()) && p.getCellSize() == policy.getCellSize()) { - throw new IllegalECPolicyException("A policy with same schema and " + - "cell size already exists"); + throw new IllegalECPolicyException("A policy with same schema " + + policy.getSchema().toString() + " and cell size " + + p.getCellSize() + " is already exists"); } } policy.setName(assignedNewName); policy.setId(getNextAvailablePolicyID()); this.userPoliciesByName.put(policy.getName(), policy); this.userPoliciesByID.put(policy.getId(), policy); + return policy; } private byte getNextAvailablePolicyID() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 7b1cd2c7a75..a875e4b50a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -212,10 +212,10 @@ final class FSDirErasureCodingOp { return fsd.getAuditFileInfo(iip); } - static void addErasureCodePolicy(final FSNamesystem fsn, + static ErasureCodingPolicy addErasureCodePolicy(final FSNamesystem fsn, ErasureCodingPolicy policy) throws IllegalECPolicyException { Preconditions.checkNotNull(policy); - fsn.getErasureCodingPolicyManager().addPolicy(policy); + return fsn.getErasureCodingPolicyManager().addPolicy(policy); } private static List removeErasureCodingPolicyXAttr( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index c17488d561d..15e52b7ffd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -88,6 +88,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; import static org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.*; + import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; import static org.apache.hadoop.util.Time.now; import static org.apache.hadoop.util.Time.monotonicNow; @@ -175,7 +176,7 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.UnknownCryptoProtocolVersionException; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; @@ -6847,21 +6848,33 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, * @param policies The policies to add. * @return The according result of add operation. */ - AddingECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies) + AddECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies) throws IOException { + final String operationName = "addECPolicies"; checkOperation(OperationCategory.WRITE); - List responses = new ArrayList<>(); + List responses = new ArrayList<>(); + boolean success = false; writeLock(); - for (ErasureCodingPolicy policy : policies) { - try { - FSDirErasureCodingOp.addErasureCodePolicy(this, policy); - responses.add(new AddingECPolicyResponse(policy)); - } catch (IllegalECPolicyException e) { - responses.add(new AddingECPolicyResponse(policy, e)); + try { + checkOperation(OperationCategory.WRITE); + for (ErasureCodingPolicy policy : policies) { + try { + ErasureCodingPolicy newPolicy = + FSDirErasureCodingOp.addErasureCodePolicy(this, policy); + responses.add(new AddECPolicyResponse(newPolicy)); + } catch (IllegalECPolicyException e) { + responses.add(new AddECPolicyResponse(policy, e)); + } } + success = true; + return responses.toArray(new AddECPolicyResponse[0]); + } finally { + writeUnlock(operationName); + if (success) { + getEditLog().logSync(); + } + logAuditEvent(success, operationName, null, null, null); } - writeUnlock("addECPolicies"); - return responses.toArray(new AddingECPolicyResponse[0]); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index fba34271c61..4a4fe9d0b77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -84,7 +84,7 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.inotify.EventBatch; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AclException; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; @@ -2270,9 +2270,10 @@ public class NameNodeRpcServer implements NamenodeProtocols { } @Override - public AddingECPolicyResponse[] addErasureCodingPolicies( + public AddECPolicyResponse[] addErasureCodingPolicies( ErasureCodingPolicy[] policies) throws IOException { checkNNStartup(); + namesystem.checkSuperuserPrivilege(); return namesystem.addECPolicies(policies); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java index 6508ec0fb15..2041a050317 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java @@ -22,7 +22,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.util.ECPolicyLoader; import org.apache.hadoop.tools.TableListing; @@ -182,9 +182,9 @@ public class ECAdmin extends Configured implements Tool { List policies = new ECPolicyLoader().loadPolicy(filePath); if (policies.size() > 0) { - AddingECPolicyResponse[] responses = dfs.addErasureCodingPolicies( + AddECPolicyResponse[] responses = dfs.addErasureCodingPolicies( policies.toArray(new ErasureCodingPolicy[policies.size()])); - for (AddingECPolicyResponse response : responses) { + for (AddECPolicyResponse response : responses) { System.out.println(response); } } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 9ddd343bbda..82090e6113f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -2953,6 +2953,13 @@ + + dfs.namenode.ec.policies.max.cellsize + 4194304 + The maximum cell size of erasure coding policy. Default is 4MB. + + + dfs.datanode.ec.reconstruction.stripedread.timeout.millis 5000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md index fb2d56b2803..dcb56f86a6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md @@ -154,6 +154,7 @@ Deployment [-getPolicy -path ] [-unsetPolicy -path ] [-listPolicies] + [-addPolicies -policyFile ] [-listCodecs] [-usage [cmd ...]] [-help [cmd ...]] @@ -182,7 +183,7 @@ Below are the details about each command. * `[-addPolicies -policyFile ]` - Add a list of erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file. + Add a list of erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file. The maximum cell size is defined in property 'dfs.namenode.ec.policies.max.cellsize' with the default value 4MB. * `[-listCodecs]` diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index f024fb6a5f2..e9af5949214 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -49,8 +49,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; @@ -77,18 +75,14 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; import org.apache.hadoop.hdfs.net.Peer; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; -import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.web.WebHdfsConstants; -import org.apache.hadoop.io.erasurecode.ECSchema; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.ScriptBasedMapping; @@ -1449,37 +1443,4 @@ public class TestDistributedFileSystem { } } } - - @Test - public void testAddErasureCodingPolicies() throws Exception { - Configuration conf = getTestConfiguration(); - MiniDFSCluster cluster = null; - - try { - ErasureCodingPolicy policy1 = - SystemErasureCodingPolicies.getPolicies().get(0); - conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, - Stream.of(policy1).map(ErasureCodingPolicy::getName) - .collect(Collectors.joining(", "))); - - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); - DistributedFileSystem fs = cluster.getFileSystem(); - - ECSchema toAddSchema = new ECSchema("testcodec", 3, 2); - ErasureCodingPolicy toAddPolicy = - new ErasureCodingPolicy(toAddSchema, 128 * 1024, (byte) 254); - ErasureCodingPolicy[] policies = new ErasureCodingPolicy[]{ - policy1, toAddPolicy}; - AddingECPolicyResponse[] responses = - fs.addErasureCodingPolicies(policies); - assertEquals(2, responses.length); - assertFalse(responses[0].isSucceed()); - assertTrue(responses[1].isSucceed()); - assertTrue(responses[1].getPolicy().getId() > 0); - } finally { - if (cluster != null) { - cluster.shutdown(); - } - } - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java index 0e4bd29b065..77e6594753a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java @@ -23,10 +23,12 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.client.HdfsAdmin; @@ -639,4 +641,56 @@ public class TestErasureCodingPolicies { } } } + + @Test + public void testAddErasureCodingPolicies() throws Exception { + // Test nonexistent codec name + ECSchema toAddSchema = new ECSchema("testcodec", 3, 2); + ErasureCodingPolicy newPolicy = + new ErasureCodingPolicy(toAddSchema, 128 * 1024); + ErasureCodingPolicy[] policyArray = new ErasureCodingPolicy[]{newPolicy}; + AddECPolicyResponse[] responses = + fs.addErasureCodingPolicies(policyArray); + assertEquals(1, responses.length); + assertFalse(responses[0].isSucceed()); + + // Test too big cell size + toAddSchema = new ECSchema("rs", 3, 2); + newPolicy = + new ErasureCodingPolicy(toAddSchema, 128 * 1024 * 1024); + policyArray = new ErasureCodingPolicy[]{newPolicy}; + responses = fs.addErasureCodingPolicies(policyArray); + assertEquals(1, responses.length); + assertFalse(responses[0].isSucceed()); + + // Test other invalid cell size + toAddSchema = new ECSchema("rs", 3, 2); + int[] cellSizes = {0, -1, 1023}; + for (int cellSize: cellSizes) { + try { + new ErasureCodingPolicy(toAddSchema, cellSize); + Assert.fail("Invalid cell size should be detected."); + } catch (Exception e){ + GenericTestUtils.assertExceptionContains("cellSize must be", e); + } + } + + // Test duplicate policy + ErasureCodingPolicy policy0 = + SystemErasureCodingPolicies.getPolicies().get(0); + policyArray = new ErasureCodingPolicy[]{policy0}; + responses = fs.addErasureCodingPolicies(policyArray); + assertEquals(1, responses.length); + assertFalse(responses[0].isSucceed()); + + // Test add policy successfully + newPolicy = + new ErasureCodingPolicy(toAddSchema, 1 * 1024 * 1024); + policyArray = new ErasureCodingPolicy[]{newPolicy}; + responses = fs.addErasureCodingPolicies(policyArray); + assertEquals(1, responses.length); + assertTrue(responses[0].isSucceed()); + assertEquals(SystemErasureCodingPolicies.getPolicies().size() + 1, + ErasureCodingPolicyManager.getInstance().getPolicies().length); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index 5d5260938bc..3559825556f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocolPB; import com.google.protobuf.UninitializedMessageException; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies; import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports; @@ -913,14 +913,14 @@ public class TestPBHelper { // Check conversion of the built-in policies. for (ErasureCodingPolicy policy : SystemErasureCodingPolicies.getPolicies()) { - AddingECPolicyResponse response = new AddingECPolicyResponse(policy); - HdfsProtos.AddingECPolicyResponseProto proto = PBHelperClient - .convertAddingECPolicyResponse(response); + AddECPolicyResponse response = new AddECPolicyResponse(policy); + HdfsProtos.AddECPolicyResponseProto proto = PBHelperClient + .convertAddECPolicyResponse(response); // Optional fields should not be set. assertFalse("Unnecessary field is set.", proto.hasErrorMsg()); // Convert proto back to an object and check for equality. - AddingECPolicyResponse convertedResponse = PBHelperClient - .convertAddingECPolicyResponse(proto); + AddECPolicyResponse convertedResponse = PBHelperClient + .convertAddECPolicyResponse(proto); assertEquals("Converted policy not equal", response.getPolicy(), convertedResponse.getPolicy()); assertEquals("Converted policy not equal", response.isSucceed(), @@ -929,13 +929,13 @@ public class TestPBHelper { ErasureCodingPolicy policy = SystemErasureCodingPolicies .getPolicies().get(0); - AddingECPolicyResponse response = - new AddingECPolicyResponse(policy, "failed"); - HdfsProtos.AddingECPolicyResponseProto proto = PBHelperClient - .convertAddingECPolicyResponse(response); + AddECPolicyResponse response = + new AddECPolicyResponse(policy, "failed"); + HdfsProtos.AddECPolicyResponseProto proto = PBHelperClient + .convertAddECPolicyResponse(response); // Convert proto back to an object and check for equality. - AddingECPolicyResponse convertedResponse = PBHelperClient - .convertAddingECPolicyResponse(proto); + AddECPolicyResponse convertedResponse = PBHelperClient + .convertAddECPolicyResponse(proto); assertEquals("Converted policy not equal", response.getPolicy(), convertedResponse.getPolicy()); assertEquals("Converted policy not equal", response.getErrorMsg(), @@ -961,7 +961,7 @@ public class TestPBHelper { // Check conversion of a non-built-in policy. ECSchema newSchema = new ECSchema("testcodec", 3, 2); ErasureCodingPolicy newPolicy = - new ErasureCodingPolicy(newSchema, 128 * 1024, (byte) 254); + new ErasureCodingPolicy(newSchema, 128 * 1024); HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient .convertErasureCodingPolicy(newPolicy); // Optional fields should be set.