HDFS-12723. TestReadStripedFileWithMissingBlocks#testReadFileWithMissingBlocks failing consistently. Contributed by Ajay Kumar.

This commit is contained in:
Inigo Goiri 2018-03-15 10:14:35 -07:00
parent 5e013d50d1
commit 6de135169e
2 changed files with 56 additions and 19 deletions

View File

@ -2277,6 +2277,22 @@ public class MiniDFSCluster implements AutoCloseable {
return stopDataNode(node); return stopDataNode(node);
} }
/*
* Restart a DataNode by name.
* @return true if DataNode restart is successful else returns false
*/
public synchronized boolean restartDataNode(String dnName)
throws IOException {
for (int i = 0; i < dataNodes.size(); i++) {
DataNode dn = dataNodes.get(i).datanode;
if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
return restartDataNode(i);
}
}
return false;
}
/* /*
* Shutdown a particular datanode * Shutdown a particular datanode
* @param i node index * @param i node index

View File

@ -22,7 +22,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -40,6 +42,7 @@ public class TestReadStripedFileWithMissingBlocks {
.getLog(TestReadStripedFileWithMissingBlocks.class); .getLog(TestReadStripedFileWithMissingBlocks.class);
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private DistributedFileSystem fs; private DistributedFileSystem fs;
private DFSClient dfsClient;
private Configuration conf = new HdfsConfiguration(); private Configuration conf = new HdfsConfiguration();
private final ErasureCodingPolicy ecPolicy = private final ErasureCodingPolicy ecPolicy =
StripedFileTestUtil.getDefaultECPolicy(); StripedFileTestUtil.getDefaultECPolicy();
@ -49,7 +52,9 @@ public class TestReadStripedFileWithMissingBlocks {
private final int stripPerBlock = 4; private final int stripPerBlock = 4;
private final int blockSize = stripPerBlock * cellSize; private final int blockSize = stripPerBlock * cellSize;
private final int blockGroupSize = blockSize * dataBlocks; private final int blockGroupSize = blockSize * dataBlocks;
private final int numDNs = dataBlocks + parityBlocks; // Starting with two more datanodes, minimum 9 should be up for
// test to pass.
private final int numDNs = dataBlocks + parityBlocks + 2;
private final int fileLength = blockSize * dataBlocks + 123; private final int fileLength = blockSize * dataBlocks + 123;
@Rule @Rule
@ -63,6 +68,8 @@ public class TestReadStripedFileWithMissingBlocks {
"/", ecPolicy.getName()); "/", ecPolicy.getName());
fs = cluster.getFileSystem(); fs = cluster.getFileSystem();
fs.enableErasureCodingPolicy(ecPolicy.getName()); fs.enableErasureCodingPolicy(ecPolicy.getName());
dfsClient = new DFSClient(cluster.getNameNode(0).getNameNodeAddress(),
conf);
} }
public void tearDown() throws IOException { public void tearDown() throws IOException {
@ -74,29 +81,34 @@ public class TestReadStripedFileWithMissingBlocks {
@Test @Test
public void testReadFileWithMissingBlocks() throws Exception { public void testReadFileWithMissingBlocks() throws Exception {
for (int missingData = 1; missingData <= dataBlocks; missingData++) { try {
for (int missingParity = 0; missingParity <= setup();
parityBlocks - missingData; missingParity++) { Path srcPath = new Path("/foo");
try { final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
setup(); DFSTestUtil.writeFile(fs, srcPath, new String(expected));
readFileWithMissingBlocks(new Path("/foo"), fileLength, StripedFileTestUtil
missingData, missingParity); .waitBlockGroupsReported(fs, srcPath.toUri().getPath());
} finally { StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
tearDown();
for (int missingData = 1; missingData <= dataBlocks; missingData++) {
for (int missingParity = 0; missingParity <=
parityBlocks - missingData; missingParity++) {
readFileWithMissingBlocks(srcPath, fileLength, missingData,
missingParity, expected);
} }
} }
} finally {
tearDown();
} }
} }
private void readFileWithMissingBlocks(Path srcPath, int fileLength, private void readFileWithMissingBlocks(Path srcPath, int fileLength,
int missingDataNum, int missingParityNum) int missingDataNum, int missingParityNum, byte[] expected)
throws Exception { throws Exception {
LOG.info("readFileWithMissingBlocks: (" + missingDataNum + "," LOG.info("readFileWithMissingBlocks: (" + missingDataNum + ","
+ missingParityNum + ")"); + missingParityNum + ")");
final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
DFSTestUtil.writeFile(fs, srcPath, new String(expected));
StripedFileTestUtil.waitBlockGroupsReported(fs, srcPath.toUri().getPath());
StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
int dataBlocks = (fileLength - 1) / cellSize + 1; int dataBlocks = (fileLength - 1) / cellSize + 1;
BlockLocation[] locs = fs.getFileBlockLocations(srcPath, 0, cellSize); BlockLocation[] locs = fs.getFileBlockLocations(srcPath, 0, cellSize);
@ -112,7 +124,8 @@ public class TestReadStripedFileWithMissingBlocks {
// make sure there are missing block locations // make sure there are missing block locations
BlockLocation[] newLocs = fs.getFileBlockLocations(srcPath, 0, cellSize); BlockLocation[] newLocs = fs.getFileBlockLocations(srcPath, 0, cellSize);
Assert.assertTrue(newLocs[0].getNames().length < locs[0].getNames().length); Assert.assertTrue(
newLocs[0].getNames().length < locs[0].getNames().length);
byte[] smallBuf = new byte[1024]; byte[] smallBuf = new byte[1024];
byte[] largeBuf = new byte[fileLength + 100]; byte[] largeBuf = new byte[fileLength + 100];
@ -120,10 +133,18 @@ public class TestReadStripedFileWithMissingBlocks {
blockGroupSize); blockGroupSize);
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
smallBuf); smallBuf);
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf); StripedFileTestUtil
.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
restartDeadDataNodes();
}
// delete the file private void restartDeadDataNodes() throws IOException {
fs.delete(srcPath, true); DatanodeInfo[] deadNodes = dfsClient
.datanodeReport(DatanodeReportType.DEAD);
for (DatanodeInfo dnInfo : deadNodes) {
cluster.restartDataNode(dnInfo.getXferAddr());
}
cluster.triggerHeartbeats();
} }
private void stopDataNodes(BlockLocation[] locs, int[] datanodes) private void stopDataNodes(BlockLocation[] locs, int[] datanodes)