HDFS-12677. Extend TestReconstructStripedFile with a random EC policy. Contributed by Takanobu Asanuma

This commit is contained in:
Chris Douglas 2018-03-12 14:29:44 -07:00
parent ddb67ca707
commit 39a5fbae47
2 changed files with 78 additions and 19 deletions

View File

@ -19,6 +19,7 @@
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@ -67,14 +68,13 @@
public class TestReconstructStripedFile { public class TestReconstructStripedFile {
public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class); public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class);
private final ErasureCodingPolicy ecPolicy = private ErasureCodingPolicy ecPolicy;
StripedFileTestUtil.getDefaultECPolicy(); private int dataBlkNum;
private final int dataBlkNum = ecPolicy.getNumDataUnits(); private int parityBlkNum;
private final int parityBlkNum = ecPolicy.getNumParityUnits(); private int cellSize;
private final int cellSize = ecPolicy.getCellSize(); private int blockSize;
private final int blockSize = cellSize * 3; private int groupSize;
private final int groupSize = dataBlkNum + parityBlkNum; private int dnNum;
private final int dnNum = groupSize + parityBlkNum;
static { static {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL); GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
@ -95,8 +95,20 @@ enum ReconstructionType {
private Map<DatanodeID, Integer> dnMap = new HashMap<>(); private Map<DatanodeID, Integer> dnMap = new HashMap<>();
private final Random random = new Random(); private final Random random = new Random();
public ErasureCodingPolicy getEcPolicy() {
return StripedFileTestUtil.getDefaultECPolicy();
}
@Before @Before
public void setup() throws IOException { public void setup() throws IOException {
ecPolicy = getEcPolicy();
dataBlkNum = ecPolicy.getNumDataUnits();
parityBlkNum = ecPolicy.getNumParityUnits();
cellSize = ecPolicy.getCellSize();
blockSize = cellSize * 3;
groupSize = dataBlkNum + parityBlkNum;
dnNum = groupSize + parityBlkNum;
conf = new Configuration(); conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt( conf.setInt(
@ -114,10 +126,8 @@ public void setup() throws IOException {
cluster.waitActive(); cluster.waitActive();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
fs.enableErasureCodingPolicy( fs.enableErasureCodingPolicy(ecPolicy.getName());
StripedFileTestUtil.getDefaultECPolicy().getName()); fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
fs.getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
List<DataNode> datanodes = cluster.getDataNodes(); List<DataNode> datanodes = cluster.getDataNodes();
for (int i = 0; i < dnNum; i++) { for (int i = 0; i < dnNum; i++) {
@ -432,7 +442,7 @@ public void testProcessErasureCodingTasksSubmitionShouldSucceed()
BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo( BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(
new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices,
StripedFileTestUtil.getDefaultECPolicy()); ecPolicy);
List<BlockECReconstructionInfo> ecTasks = new ArrayList<>(); List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
ecTasks.add(invalidECInfo); ecTasks.add(invalidECInfo);
dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks); dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
@ -461,7 +471,8 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception {
.numDataNodes(numDataNodes).build(); .numDataNodes(numDataNodes).build();
cluster.waitActive(); cluster.waitActive();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
ErasureCodingPolicy policy = StripedFileTestUtil.getDefaultECPolicy(); ErasureCodingPolicy policy = ecPolicy;
fs.enableErasureCodingPolicy(policy.getName());
fs.getClient().setErasureCodingPolicy("/", policy.getName()); fs.getClient().setErasureCodingPolicy("/", policy.getName());
final int fileLen = cellSize * ecPolicy.getNumDataUnits(); final int fileLen = cellSize * ecPolicy.getNumDataUnits();
@ -470,7 +481,8 @@ private void testNNSendsErasureCodingTasks(int deadDN) throws Exception {
} }
// Inject data-loss by tear down desired number of DataNodes. // Inject data-loss by tear down desired number of DataNodes.
assertTrue(policy.getNumParityUnits() >= deadDN); assumeTrue("Ignore case where num dead DNs > num parity units",
policy.getNumParityUnits() >= deadDN);
List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes()); List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes());
Collections.shuffle(dataNodes); Collections.shuffle(dataNodes);
for (DataNode dn : dataNodes.subList(0, deadDN)) { for (DataNode dn : dataNodes.subList(0, deadDN)) {
@ -516,10 +528,8 @@ private void testErasureCodingWorkerXmitsWeight(
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();
cluster.waitActive(); cluster.waitActive();
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
fs.enableErasureCodingPolicy( fs.enableErasureCodingPolicy(ecPolicy.getName());
StripedFileTestUtil.getDefaultECPolicy().getName()); fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
fs.getClient().setErasureCodingPolicy("/",
StripedFileTestUtil.getDefaultECPolicy().getName());
final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2; final int fileLen = cellSize * ecPolicy.getNumDataUnits() * 2;
writeFile(fs, "/ec-xmits-weight", fileLen); writeFile(fs, "/ec-xmits-weight", fileLen);

View File

@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This test extends TestReconstructStripedFile to use a random
* (non-default) EC policy.
*/
public class TestReconstructStripedFileWithRandomECPolicy extends
TestReconstructStripedFile {
private static final Logger LOG = LoggerFactory.getLogger(
TestReconstructStripedFileWithRandomECPolicy.class);
private ErasureCodingPolicy ecPolicy;
public TestReconstructStripedFileWithRandomECPolicy() {
// If you want to debug this test with a specific ec policy, please use
// SystemErasureCodingPolicies class.
// e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
LOG.info("run {} with {}.",
TestReconstructStripedFileWithRandomECPolicy.class
.getSuperclass().getSimpleName(), ecPolicy.getName());
}
@Override
public ErasureCodingPolicy getEcPolicy() {
return ecPolicy;
}
}