HDFS-16430. Add validation to maximum blocks in EC group when adding an EC policy (#3899). Contributed by daimin.

Reviewed-by: tomscut <litao@bigo.sg>
Signed-off-by: Ayush Saxena <ayushsaxena@apache.org>
This commit is contained in:
daimin 2022-01-24 14:34:26 +08:00 committed by GitHub
parent 15b820c83c
commit 5ef335da1e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 16 additions and 0 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
@ -304,6 +305,12 @@ public final class ErasureCodingPolicyManager {
+ policy.getCodecName() + " is not supported");
}
int blocksInGroup = policy.getNumDataUnits() + policy.getNumParityUnits();
if (blocksInGroup > HdfsServerConstants.MAX_BLOCKS_IN_GROUP) {
throw new HadoopIllegalArgumentException("Number of data and parity blocks in an EC group " +
blocksInGroup + " should not exceed maximum " + HdfsServerConstants.MAX_BLOCKS_IN_GROUP);
}
if (policy.getCellSize() > maxCellSize) {
throw new HadoopIllegalArgumentException("Cell size " +
policy.getCellSize() + " should not exceed maximum " +

View File

@ -747,6 +747,15 @@ public class TestErasureCodingPolicies {
assertEquals(1, responses.length);
assertFalse(responses[0].isSucceed());
// Test numDataUnits + numParityUnits > 16
toAddSchema = new ECSchema("rs", 14, 4);
newPolicy =
new ErasureCodingPolicy(toAddSchema, 128 * 1024 * 1024);
policyArray = new ErasureCodingPolicy[]{newPolicy};
responses = fs.addErasureCodingPolicies(policyArray);
assertEquals(1, responses.length);
assertFalse(responses[0].isSucceed());
// Test too big cell size
toAddSchema = new ECSchema("rs", 3, 2);
newPolicy =