HDFS-12444. Reduce runtime of TestWriteReadStripedFile. Contributed by Huafeng Wang and Andrew Wang.
This commit is contained in:
parent
7bbeacb75e
commit
59830ca772
|
@ -79,10 +79,15 @@ public class StripedFileTestUtil {
|
||||||
assertEquals("File length should be the same", fileLength, status.getLen());
|
assertEquals("File length should be the same", fileLength, status.getLen());
|
||||||
}
|
}
|
||||||
|
|
||||||
static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
|
static void verifyPread(DistributedFileSystem fs, Path srcPath,
|
||||||
byte[] expected, byte[] buf) throws IOException {
|
int fileLength, byte[] expected, byte[] buf) throws IOException {
|
||||||
final ErasureCodingPolicy ecPolicy =
|
final ErasureCodingPolicy ecPolicy = fs.getErasureCodingPolicy(srcPath);
|
||||||
((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
|
verifyPread(fs, srcPath, fileLength, expected, buf, ecPolicy);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
|
||||||
|
byte[] expected, byte[] buf, ErasureCodingPolicy ecPolicy)
|
||||||
|
throws IOException {
|
||||||
try (FSDataInputStream in = fs.open(srcPath)) {
|
try (FSDataInputStream in = fs.open(srcPath)) {
|
||||||
int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
|
int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
|
||||||
ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,
|
ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
|
@ -47,12 +48,13 @@ import java.util.Random;
|
||||||
public class TestWriteReadStripedFile {
|
public class TestWriteReadStripedFile {
|
||||||
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
|
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
|
||||||
private final ErasureCodingPolicy ecPolicy =
|
private final ErasureCodingPolicy ecPolicy =
|
||||||
StripedFileTestUtil.getDefaultECPolicy();
|
SystemErasureCodingPolicies.getByID(
|
||||||
|
SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
|
||||||
private final int cellSize = ecPolicy.getCellSize();
|
private final int cellSize = ecPolicy.getCellSize();
|
||||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||||
private final int numDNs = dataBlocks + parityBlocks;
|
private final int numDNs = dataBlocks + parityBlocks;
|
||||||
private final int stripesPerBlock = 4;
|
private final int stripesPerBlock = 2;
|
||||||
private final int blockSize = stripesPerBlock * cellSize;
|
private final int blockSize = stripesPerBlock * cellSize;
|
||||||
private final int blockGroupSize = blockSize * dataBlocks;
|
private final int blockGroupSize = blockSize * dataBlocks;
|
||||||
|
|
||||||
|
@ -78,11 +80,10 @@ public class TestWriteReadStripedFile {
|
||||||
false);
|
false);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
fs.enableErasureCodingPolicy(
|
fs.enableErasureCodingPolicy(ecPolicy.getName());
|
||||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
|
||||||
fs.mkdirs(new Path("/ec"));
|
fs.mkdirs(new Path("/ec"));
|
||||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
|
cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
|
||||||
StripedFileTestUtil.getDefaultECPolicy().getName());
|
ecPolicy.getName());
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
@ -225,7 +226,8 @@ public class TestWriteReadStripedFile {
|
||||||
|
|
||||||
byte[] smallBuf = new byte[1024];
|
byte[] smallBuf = new byte[1024];
|
||||||
byte[] largeBuf = new byte[fileLength + 100];
|
byte[] largeBuf = new byte[fileLength + 100];
|
||||||
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected,
|
||||||
|
largeBuf);
|
||||||
|
|
||||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||||
largeBuf);
|
largeBuf);
|
||||||
|
@ -268,13 +270,15 @@ public class TestWriteReadStripedFile {
|
||||||
|
|
||||||
byte[] smallBuf = new byte[1024];
|
byte[] smallBuf = new byte[1024];
|
||||||
byte[] largeBuf = new byte[fileLength + 100];
|
byte[] largeBuf = new byte[fileLength + 100];
|
||||||
// TODO: HDFS-8797
|
StripedFileTestUtil
|
||||||
//StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
.verifyPread(fs, srcPath, fileLength, expected, largeBuf, ecPolicy);
|
||||||
|
|
||||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
|
StripedFileTestUtil
|
||||||
|
.verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
|
||||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
||||||
blockGroupSize);
|
blockGroupSize);
|
||||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
|
StripedFileTestUtil
|
||||||
|
.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
|
||||||
// webhdfs doesn't support bytebuffer read
|
// webhdfs doesn't support bytebuffer read
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -160,7 +160,8 @@ public class TestWriteStripedFileWithFailure {
|
||||||
blockSize * dataBlocks);
|
blockSize * dataBlocks);
|
||||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||||
smallBuf);
|
smallBuf);
|
||||||
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
StripedFileTestUtil.verifyPread((DistributedFileSystem)fs, srcPath,
|
||||||
|
fileLength, expected, largeBuf);
|
||||||
|
|
||||||
// delete the file
|
// delete the file
|
||||||
fs.delete(srcPath, true);
|
fs.delete(srcPath, true);
|
||||||
|
|
Loading…
Reference in New Issue