HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping related tests [Part 2]. Contributed by Rui Li.

Change-Id: I2100bc27ad484f83c9cb2d2e5bb232f4f74fd286
This commit is contained in:
Zhe Zhang 2016-02-11 10:14:09 -08:00
parent fa00d3e205
commit 0aa8c82894
6 changed files with 72 additions and 69 deletions

View File

@ -927,6 +927,9 @@ Trunk (Unreleased)
HDFS-9775. Erasure Coding : Rename BlockRecoveryWork to
BlockReconstructionWork. (Rakesh R via zhz)
HDFS-9755. Erasure Coding: allow to use multiple EC policies in striping
related tests [Part 2]. (Rui Li via zhz)
Release 2.9.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -68,13 +68,21 @@ public class TestReadStripedFileWithDecoding {
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
private final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
private static final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
private static final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
private final int smallFileLength = blockSize * dataBlocks - 123;
private final int largeFileLength = blockSize * dataBlocks + 123;
private final int[] fileLengths = {smallFileLength, largeFileLength};
private final int[] dnFailureNums = {1, 2, 3};
private static final int[] dnFailureNums = getDnFailureNums();
private static int[] getDnFailureNums() {
int[] dnFailureNums = new int[parityBlocks];
for (int i = 0; i < dnFailureNums.length; i++) {
dnFailureNums[i] = i + 1;
}
return dnFailureNums;
}
@Rule
public Timeout globalTimeout = new Timeout(300000);
@ -132,8 +140,9 @@ public class TestReadStripedFileWithDecoding {
@Test(timeout=300000)
public void testReadCorruptedData() throws IOException {
for (int fileLength : fileLengths) {
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
parityDelNum++) {
String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
testReadWithBlockCorrupted(src, fileLength,
dataDelNum, parityDelNum, false);
@ -149,8 +158,9 @@ public class TestReadStripedFileWithDecoding {
@Test(timeout=300000)
public void testReadCorruptedDataByDeleting() throws IOException {
for (int fileLength : fileLengths) {
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
parityDelNum++) {
String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
testReadWithBlockCorrupted(src, fileLength,
dataDelNum, parityDelNum, true);

View File

@ -21,18 +21,17 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.Timeout;
import java.io.IOException;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.TEST_EC_POLICY;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
@ -53,16 +52,15 @@ public class TestReadStripedFileWithMissingBlocks {
@Rule
public Timeout globalTimeout = new Timeout(300000);
@Before
public void setup() throws IOException {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
cluster.getFileSystem().getClient().setErasureCodingPolicy(
"/", TEST_EC_POLICY);
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws IOException {
if (cluster != null) {
cluster.shutdown();
@ -71,33 +69,19 @@ public class TestReadStripedFileWithMissingBlocks {
}
@Test
public void testReadFileWithMissingBlocks1() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 0);
}
@Test
public void testReadFileWithMissingBlocks2() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 1);
}
@Test
public void testReadFileWithMissingBlocks3() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 1, 2);
}
@Test
public void testReadFileWithMissingBlocks4() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 0);
}
@Test
public void testReadFileWithMissingBlocks5() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 2, 1);
}
@Test
public void testReadFileWithMissingBlocks6() throws Exception {
readFileWithMissingBlocks(new Path("/foo"), fileLength, 3, 0);
public void testReadFileWithMissingBlocks() throws Exception {
for (int missingData = 1; missingData <= NUM_PARITY_BLOCKS; missingData++) {
for (int missingParity = 0; missingParity <=
NUM_PARITY_BLOCKS - missingData; missingParity++) {
try {
setup();
readFileWithMissingBlocks(new Path("/foo"), fileLength,
missingData, missingParity);
} finally {
tearDown();
}
}
}
}
private void readFileWithMissingBlocks(Path srcPath, int fileLength,

View File

@ -113,14 +113,14 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverOneParityBlock() throws Exception {
int fileLen = 10 * blockSize + blockSize/10;
int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock", fileLen,
ReconstructionType.ParityOnly, 1);
}
@Test(timeout = 120000)
public void testRecoverOneParityBlock1() throws Exception {
int fileLen = cellSize + cellSize/10;
int fileLen = cellSize + cellSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock1", fileLen,
ReconstructionType.ParityOnly, 1);
}
@ -134,35 +134,35 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverOneParityBlock3() throws Exception {
int fileLen = 3 * blockSize + blockSize/10;
int fileLen = (dataBlkNum - 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneParityBlock3", fileLen,
ReconstructionType.ParityOnly, 1);
}
@Test(timeout = 120000)
public void testRecoverThreeParityBlocks() throws Exception {
int fileLen = 10 * blockSize + blockSize/10;
assertFileBlocksReconstruction("/testRecoverThreeParityBlocks", fileLen,
ReconstructionType.ParityOnly, 3);
public void testRecoverAllParityBlocks() throws Exception {
int fileLen = dataBlkNum * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAllParityBlocks", fileLen,
ReconstructionType.ParityOnly, parityBlkNum);
}
@Test(timeout = 120000)
public void testRecoverThreeDataBlocks() throws Exception {
int fileLen = 10 * blockSize + blockSize/10;
assertFileBlocksReconstruction("/testRecoverThreeDataBlocks", fileLen,
ReconstructionType.DataOnly, 3);
public void testRecoverAllDataBlocks() throws Exception {
int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAllDataBlocks", fileLen,
ReconstructionType.DataOnly, parityBlkNum);
}
@Test(timeout = 120000)
public void testRecoverThreeDataBlocks1() throws Exception {
int fileLen = 3 * blockSize + blockSize/10;
assertFileBlocksReconstruction("/testRecoverThreeDataBlocks1", fileLen,
ReconstructionType.DataOnly, 3);
public void testRecoverAllDataBlocks1() throws Exception {
int fileLen = parityBlkNum * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAllDataBlocks1", fileLen,
ReconstructionType.DataOnly, parityBlkNum);
}
@Test(timeout = 120000)
public void testRecoverOneDataBlock() throws Exception {
int fileLen = 10 * blockSize + blockSize/10;
int fileLen = (dataBlkNum + 1) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverOneDataBlock", fileLen,
ReconstructionType.DataOnly, 1);
}
@ -183,16 +183,16 @@ public class TestReconstructStripedFile {
@Test(timeout = 120000)
public void testRecoverAnyBlocks() throws Exception {
int fileLen = 3 * blockSize + blockSize/10;
int fileLen = parityBlkNum * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAnyBlocks", fileLen,
ReconstructionType.Any, 2);
ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
}
@Test(timeout = 120000)
public void testRecoverAnyBlocks1() throws Exception {
int fileLen = 10 * blockSize + blockSize/10;
int fileLen = (dataBlkNum + parityBlkNum) * blockSize + blockSize / 10;
assertFileBlocksReconstruction("/testRecoverAnyBlocks1", fileLen,
ReconstructionType.Any, 3);
ReconstructionType.Any, random.nextInt(parityBlkNum) + 1);
}
private int[] generateDeadDnIndices(ReconstructionType type, int deadNum,
@ -259,6 +259,7 @@ public class TestReconstructStripedFile {
if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
}
assertTrue("File length must be positive.", fileLen > 0);
Path file = new Path(fileName);
@ -289,6 +290,7 @@ public class TestReconstructStripedFile {
int[] deadDnIndices = new int[toRecoverBlockNum];
ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
File[] replicas = new File[toRecoverBlockNum];
long[] replicaLengths = new long[toRecoverBlockNum];
File[] metadatas = new File[toRecoverBlockNum];
byte[][] replicaContents = new byte[toRecoverBlockNum][];
Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
@ -301,9 +303,10 @@ public class TestReconstructStripedFile {
lastBlock.getBlock(), cellSize, dataBlkNum, indices[dead[i]]);
errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
replicaLengths[i] = replicas[i].length();
metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
// the block replica on the datanode should be the same as expected
assertEquals(replicas[i].length(),
assertEquals(replicaLengths[i],
StripedBlockUtil.getInternalBlockLength(
lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[dead[i]]));
assertTrue(metadatas[i].getName().
@ -312,8 +315,10 @@ public class TestReconstructStripedFile {
replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
}
int cellsNum = (fileLen - 1) / cellSize + 1;
int groupSize = Math.min(cellsNum, dataBlkNum) + parityBlkNum;
int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum :
Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
int groupSize = lastGroupNumBlk + parityBlkNum;
// shutdown datanodes or generate corruption
int stoppedDN = generateErrors(errorMap, type);
@ -342,7 +347,7 @@ public class TestReconstructStripedFile {
LOG.info("replica after reconstruction " + replicaAfterReconstruction);
File metadataAfterReconstruction =
cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
assertEquals(replicaAfterReconstruction.length(), replicas[i].length());
assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
LOG.info("replica before " + replicas[i]);
assertTrue(metadataAfterReconstruction.getName().
endsWith(blocks[i].getGenerationStamp() + ".meta"));

View File

@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@ -79,7 +78,8 @@ public class TestSafeModeWithStripedFile {
@Test
public void testStripedFile1() throws IOException {
doTest(cellSize * 5, 5);
int numCell = DATA_BLK_NUM - 1;
doTest(cellSize * numCell, numCell);
}
/**

View File

@ -72,8 +72,9 @@ public class TestWriteStripedFileWithFailure {
@Test(timeout = 300000)
public void testWriteStripedFileWithDNFailure() throws IOException {
for (int fileLength : fileLengths) {
for (int dataDelNum = 1; dataDelNum < 4; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum+parityDelNum) < 4; parityDelNum++) {
for (int dataDelNum = 1; dataDelNum <= parityBlocks; dataDelNum++) {
for (int parityDelNum = 0; (dataDelNum + parityDelNum) <= parityBlocks;
parityDelNum++) {
try {
// setup a new cluster with no dead datanode
setup();
@ -82,7 +83,7 @@ public class TestWriteStripedFileWithFailure {
String fileType = fileLength < (blockSize * dataBlocks) ?
"smallFile" : "largeFile";
LOG.error("Failed to write file with DN failure:"
+ " fileType = "+ fileType
+ " fileType = " + fileType
+ ", dataDelNum = " + dataDelNum
+ ", parityDelNum = " + parityDelNum);
throw ioe;