HDFS-11117. Refactor striped file tests to allow flexibly test erasure coding policy. Contributed by Sammi Chen
This commit is contained in:
parent
c0b1a44f6c
commit
f6ffa11635
|
@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||
|
@ -171,8 +172,6 @@ import org.mockito.internal.util.reflection.Whitebox;
|
|||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
|
||||
/** Utilities for HDFS tests */
|
||||
public class DFSTestUtil {
|
||||
|
@ -1971,9 +1970,11 @@ public class DFSTestUtil {
|
|||
}
|
||||
}
|
||||
|
||||
final ErasureCodingPolicy ecPolicy =
|
||||
fs.getErasureCodingPolicy(new Path(file));
|
||||
// 2. RECEIVED_BLOCK IBR
|
||||
long blockSize = isStripedBlock ?
|
||||
numStripes * BLOCK_STRIPED_CELL_SIZE : len;
|
||||
numStripes * ecPolicy.getCellSize() : len;
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
DataNode dn = dataNodes.get(i);
|
||||
final Block block = new Block(lastBlock.getBlockId() + i,
|
||||
|
@ -1987,7 +1988,7 @@ public class DFSTestUtil {
|
|||
}
|
||||
}
|
||||
long bytes = isStripedBlock ?
|
||||
numStripes * BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS : len;
|
||||
numStripes * ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() : len;
|
||||
lastBlock.setNumBytes(bytes);
|
||||
return lastBlock;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem.WebHdfsInputStream;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -57,23 +56,6 @@ import static org.junit.Assert.assertEquals;
|
|||
|
||||
public class StripedFileTestUtil {
|
||||
public static final Log LOG = LogFactory.getLog(StripedFileTestUtil.class);
|
||||
/*
|
||||
* These values correspond to the values used by the system default erasure
|
||||
* coding policy.
|
||||
*/
|
||||
public static final ErasureCodingPolicy TEST_EC_POLICY =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
public static final short NUM_DATA_BLOCKS =
|
||||
(short) TEST_EC_POLICY.getNumDataUnits();
|
||||
public static final short NUM_PARITY_BLOCKS =
|
||||
(short) TEST_EC_POLICY.getNumParityUnits();
|
||||
public static final int BLOCK_STRIPED_CELL_SIZE =
|
||||
TEST_EC_POLICY.getCellSize();
|
||||
|
||||
static int stripesPerBlock = 4;
|
||||
public static int blockSize = BLOCK_STRIPED_CELL_SIZE * stripesPerBlock;
|
||||
static int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
|
||||
static int BLOCK_GROUP_SIZE = blockSize * NUM_DATA_BLOCKS;
|
||||
|
||||
public static byte[] generateBytes(int cnt) {
|
||||
byte[] bytes = new byte[cnt];
|
||||
|
@ -96,10 +78,15 @@ public class StripedFileTestUtil {
|
|||
|
||||
static void verifyPread(FileSystem fs, Path srcPath, int fileLength,
|
||||
byte[] expected, byte[] buf) throws IOException {
|
||||
final ErasureCodingPolicy ecPolicy =
|
||||
((DistributedFileSystem)fs).getErasureCodingPolicy(srcPath);
|
||||
try (FSDataInputStream in = fs.open(srcPath)) {
|
||||
int[] startOffsets = {0, 1, BLOCK_STRIPED_CELL_SIZE - 102, BLOCK_STRIPED_CELL_SIZE, BLOCK_STRIPED_CELL_SIZE + 102,
|
||||
BLOCK_STRIPED_CELL_SIZE * (NUM_DATA_BLOCKS - 1), BLOCK_STRIPED_CELL_SIZE * (NUM_DATA_BLOCKS - 1) + 102,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, fileLength - 102, fileLength - 1};
|
||||
int[] startOffsets = {0, 1, ecPolicy.getCellSize() - 102,
|
||||
ecPolicy.getCellSize(), ecPolicy.getCellSize() + 102,
|
||||
ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1),
|
||||
ecPolicy.getCellSize() * (ecPolicy.getNumDataUnits() - 1) + 102,
|
||||
ecPolicy.getCellSize() * ecPolicy.getNumDataUnits(),
|
||||
fileLength - 102, fileLength - 1};
|
||||
for (int startOffset : startOffsets) {
|
||||
startOffset = Math.max(0, Math.min(startOffset, fileLength - 1));
|
||||
int remaining = fileLength - startOffset;
|
||||
|
@ -153,8 +140,8 @@ public class StripedFileTestUtil {
|
|||
}
|
||||
}
|
||||
|
||||
static void verifySeek(FileSystem fs, Path srcPath, int fileLength)
|
||||
throws IOException {
|
||||
static void verifySeek(FileSystem fs, Path srcPath, int fileLength,
|
||||
ErasureCodingPolicy ecPolicy, int blkGroupSize) throws IOException {
|
||||
try (FSDataInputStream in = fs.open(srcPath)) {
|
||||
// seek to 1/2 of content
|
||||
int pos = fileLength / 2;
|
||||
|
@ -168,21 +155,21 @@ public class StripedFileTestUtil {
|
|||
pos = 0;
|
||||
assertSeekAndRead(in, pos, fileLength);
|
||||
|
||||
if (fileLength > BLOCK_STRIPED_CELL_SIZE) {
|
||||
if (fileLength > ecPolicy.getCellSize()) {
|
||||
// seek to cellSize boundary
|
||||
pos = BLOCK_STRIPED_CELL_SIZE - 1;
|
||||
pos = ecPolicy.getCellSize() - 1;
|
||||
assertSeekAndRead(in, pos, fileLength);
|
||||
}
|
||||
|
||||
if (fileLength > BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS) {
|
||||
if (fileLength > ecPolicy.getCellSize() * ecPolicy.getNumDataUnits()) {
|
||||
// seek to striped cell group boundary
|
||||
pos = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS - 1;
|
||||
pos = ecPolicy.getCellSize() * ecPolicy.getNumDataUnits() - 1;
|
||||
assertSeekAndRead(in, pos, fileLength);
|
||||
}
|
||||
|
||||
if (fileLength > blockSize * NUM_DATA_BLOCKS) {
|
||||
if (fileLength > blkGroupSize) {
|
||||
// seek to striped block group boundary
|
||||
pos = blockSize * NUM_DATA_BLOCKS - 1;
|
||||
pos = blkGroupSize - 1;
|
||||
assertSeekAndRead(in, pos, fileLength);
|
||||
}
|
||||
|
||||
|
@ -244,13 +231,16 @@ public class StripedFileTestUtil {
|
|||
* If the length of blockGroup is less than a full stripe, it returns the the
|
||||
* number of actual data internal blocks. Otherwise returns NUM_DATA_BLOCKS.
|
||||
*/
|
||||
public static short getRealDataBlockNum(int numBytes) {
|
||||
return (short) Math.min(NUM_DATA_BLOCKS,
|
||||
(numBytes - 1) / BLOCK_STRIPED_CELL_SIZE + 1);
|
||||
public static short getRealDataBlockNum(int numBytesInStrip,
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
return (short) Math.min(ecPolicy.getNumDataUnits(),
|
||||
(numBytesInStrip - 1) / ecPolicy.getCellSize() + 1);
|
||||
}
|
||||
|
||||
public static short getRealTotalBlockNum(int numBytes) {
|
||||
return (short) (getRealDataBlockNum(numBytes) + NUM_PARITY_BLOCKS);
|
||||
public static short getRealTotalBlockNum(int numBytesInStrip,
|
||||
ErasureCodingPolicy ecPolicy) {
|
||||
return (short) (getRealDataBlockNum(numBytesInStrip, ecPolicy) +
|
||||
ecPolicy.getNumParityUnits());
|
||||
}
|
||||
|
||||
public static void waitBlockGroupsReported(DistributedFileSystem fs,
|
||||
|
@ -267,14 +257,15 @@ public class StripedFileTestUtil {
|
|||
boolean success;
|
||||
final int ATTEMPTS = 40;
|
||||
int count = 0;
|
||||
|
||||
final ErasureCodingPolicy ecPolicy =
|
||||
fs.getErasureCodingPolicy(new Path(src));
|
||||
do {
|
||||
success = true;
|
||||
count++;
|
||||
LocatedBlocks lbs = fs.getClient().getLocatedBlocks(src, 0);
|
||||
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
||||
short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize())
|
||||
- numDeadDNs);
|
||||
short expected = (short) (getRealTotalBlockNum((int) lb.getBlockSize(),
|
||||
ecPolicy) - numDeadDNs);
|
||||
int reported = lb.getLocations().length;
|
||||
if (reported < expected){
|
||||
success = false;
|
||||
|
@ -357,7 +348,8 @@ public class StripedFileTestUtil {
|
|||
}
|
||||
|
||||
static void checkData(DistributedFileSystem dfs, Path srcPath, int length,
|
||||
List<DatanodeInfo> killedList, List<Long> oldGSList) throws IOException {
|
||||
List<DatanodeInfo> killedList, List<Long> oldGSList, int blkGroupSize)
|
||||
throws IOException {
|
||||
|
||||
StripedFileTestUtil.verifyLength(dfs, srcPath, length);
|
||||
List<List<LocatedBlock>> blockGroupList = new ArrayList<>();
|
||||
|
@ -365,10 +357,14 @@ public class StripedFileTestUtil {
|
|||
Long.MAX_VALUE);
|
||||
int expectedNumGroup = 0;
|
||||
if (length > 0) {
|
||||
expectedNumGroup = (length - 1) / BLOCK_GROUP_SIZE + 1;
|
||||
expectedNumGroup = (length - 1) / blkGroupSize + 1;
|
||||
}
|
||||
assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
|
||||
|
||||
final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
|
||||
final int cellSize = ecPolicy.getCellSize();
|
||||
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
final int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
int index = 0;
|
||||
for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
|
||||
Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
|
||||
|
@ -380,39 +376,39 @@ public class StripedFileTestUtil {
|
|||
Assert.assertTrue(s, gs >= oldGS);
|
||||
|
||||
LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
|
||||
(LocatedStripedBlock) firstBlock, BLOCK_STRIPED_CELL_SIZE,
|
||||
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS);
|
||||
(LocatedStripedBlock) firstBlock, cellSize,
|
||||
dataBlkNum, parityBlkNum);
|
||||
blockGroupList.add(Arrays.asList(blocks));
|
||||
}
|
||||
|
||||
// test each block group
|
||||
for (int group = 0; group < blockGroupList.size(); group++) {
|
||||
final boolean isLastGroup = group == blockGroupList.size() - 1;
|
||||
final int groupSize = !isLastGroup? BLOCK_GROUP_SIZE
|
||||
: length - (blockGroupList.size() - 1)*BLOCK_GROUP_SIZE;
|
||||
final int numCellInGroup = (groupSize - 1)/BLOCK_STRIPED_CELL_SIZE + 1;
|
||||
final int lastCellIndex = (numCellInGroup - 1) % NUM_DATA_BLOCKS;
|
||||
final int lastCellSize = groupSize - (numCellInGroup - 1)*BLOCK_STRIPED_CELL_SIZE;
|
||||
final int groupSize = !isLastGroup? blkGroupSize
|
||||
: length - (blockGroupList.size() - 1)*blkGroupSize;
|
||||
final int numCellInGroup = (groupSize - 1) / cellSize + 1;
|
||||
final int lastCellIndex = (numCellInGroup - 1) % dataBlkNum;
|
||||
final int lastCellSize = groupSize - (numCellInGroup - 1) * cellSize;
|
||||
|
||||
//get the data of this block
|
||||
List<LocatedBlock> blockList = blockGroupList.get(group);
|
||||
byte[][] dataBlockBytes = new byte[NUM_DATA_BLOCKS][];
|
||||
byte[][] parityBlockBytes = new byte[NUM_PARITY_BLOCKS][];
|
||||
byte[][] dataBlockBytes = new byte[dataBlkNum][];
|
||||
byte[][] parityBlockBytes = new byte[parityBlkNum][];
|
||||
|
||||
Set<Integer> checkSet = new HashSet<>();
|
||||
// for each block, use BlockReader to read data
|
||||
for (int i = 0; i < blockList.size(); i++) {
|
||||
final int j = i >= NUM_DATA_BLOCKS? 0: i;
|
||||
final int numCellInBlock = (numCellInGroup - 1)/NUM_DATA_BLOCKS
|
||||
final int j = i >= dataBlkNum? 0: i;
|
||||
final int numCellInBlock = (numCellInGroup - 1) / dataBlkNum
|
||||
+ (j <= lastCellIndex? 1: 0);
|
||||
final int blockSize = numCellInBlock*BLOCK_STRIPED_CELL_SIZE
|
||||
+ (isLastGroup && j == lastCellIndex? lastCellSize - BLOCK_STRIPED_CELL_SIZE: 0);
|
||||
final int blockSize = numCellInBlock * cellSize
|
||||
+ (isLastGroup && j == lastCellIndex? lastCellSize - cellSize: 0);
|
||||
|
||||
final byte[] blockBytes = new byte[blockSize];
|
||||
if (i < NUM_DATA_BLOCKS) {
|
||||
if (i < dataBlkNum) {
|
||||
dataBlockBytes[i] = blockBytes;
|
||||
} else {
|
||||
parityBlockBytes[i - NUM_DATA_BLOCKS] = blockBytes;
|
||||
parityBlockBytes[i - dataBlkNum] = blockBytes;
|
||||
}
|
||||
|
||||
final LocatedBlock lb = blockList.get(i);
|
||||
|
@ -440,7 +436,7 @@ public class StripedFileTestUtil {
|
|||
LOG.info("Internal blocks to check: " + checkSet);
|
||||
|
||||
// check data
|
||||
final int groupPosInFile = group*BLOCK_GROUP_SIZE;
|
||||
final int groupPosInFile = group * blkGroupSize;
|
||||
for (int i = 0; i < dataBlockBytes.length; i++) {
|
||||
boolean killed = false;
|
||||
if (!checkSet.contains(i)) {
|
||||
|
@ -449,7 +445,7 @@ public class StripedFileTestUtil {
|
|||
final byte[] actual = dataBlockBytes[i];
|
||||
for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
|
||||
final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
|
||||
BLOCK_STRIPED_CELL_SIZE, NUM_DATA_BLOCKS, posInBlk, i) + groupPosInFile;
|
||||
cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
|
||||
Assert.assertTrue(posInFile < length);
|
||||
final byte expected = getByte(posInFile);
|
||||
|
||||
|
@ -469,13 +465,14 @@ public class StripedFileTestUtil {
|
|||
// check parity
|
||||
verifyParityBlocks(dfs.getConf(),
|
||||
lbs.getLocatedBlocks().get(group).getBlockSize(),
|
||||
BLOCK_STRIPED_CELL_SIZE, dataBlockBytes, parityBlockBytes, checkSet);
|
||||
cellSize, dataBlockBytes, parityBlockBytes, checkSet,
|
||||
ecPolicy.getCodecName());
|
||||
}
|
||||
}
|
||||
|
||||
static void verifyParityBlocks(Configuration conf, final long size,
|
||||
final int cellSize, byte[][] dataBytes, byte[][] parityBytes,
|
||||
Set<Integer> checkSet) {
|
||||
Set<Integer> checkSet, String codecName) {
|
||||
// verify the parity blocks
|
||||
int parityBlkSize = (int) StripedBlockUtil.getInternalBlockLength(
|
||||
size, cellSize, dataBytes.length, dataBytes.length);
|
||||
|
@ -496,8 +493,7 @@ public class StripedFileTestUtil {
|
|||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
dataBytes.length, parityBytes.length);
|
||||
final RawErasureEncoder encoder =
|
||||
CodecUtil.createRawEncoder(conf, TEST_EC_POLICY.getCodecName(),
|
||||
coderOptions);
|
||||
CodecUtil.createRawEncoder(conf, codecName, coderOptions);
|
||||
encoder.encode(dataBytes, expectedParityBytes);
|
||||
for (int i = 0; i < parityBytes.length; i++) {
|
||||
if (checkSet.contains(i + dataBytes.length)){
|
||||
|
|
|
@ -66,19 +66,19 @@ public class TestDFSStripedInputStream {
|
|||
private Path filePath = new Path(dirPath, "file");
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int NUM_STRIPE_PER_BLOCK = 2;
|
||||
private final int INTERNAL_BLOCK_SIZE = NUM_STRIPE_PER_BLOCK * CELLSIZE;
|
||||
private final int BLOCK_GROUP_SIZE = DATA_BLK_NUM * INTERNAL_BLOCK_SIZE;
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 2;
|
||||
private final int blockSize = stripesPerBlock * cellSize;
|
||||
private final int blockGroupSize = dataBlocks * blockSize;
|
||||
|
||||
@Rule
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, INTERNAL_BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
if (ErasureCodeNative.isNativeCodeLoaded()) {
|
||||
conf.set(
|
||||
|
@ -87,7 +87,7 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
|
||||
DATA_BLK_NUM + PARITY_BLK_NUM).build();
|
||||
dataBlocks + parityBlocks).build();
|
||||
cluster.waitActive();
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
|
||||
|
@ -112,9 +112,9 @@ public class TestDFSStripedInputStream {
|
|||
public void testRefreshBlock() throws Exception {
|
||||
final int numBlocks = 4;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
|
||||
filePath.toString(), 0, blockGroupSize * numBlocks);
|
||||
final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||
filePath.toString(), false, ecPolicy, null);
|
||||
|
||||
|
@ -122,8 +122,8 @@ public class TestDFSStripedInputStream {
|
|||
for (LocatedBlock aLbList : lbList) {
|
||||
LocatedStripedBlock lsb = (LocatedStripedBlock) aLbList;
|
||||
LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb,
|
||||
CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
cellSize, dataBlocks, parityBlocks);
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
LocatedBlock refreshed = in.refreshLocatedBlock(blks[j]);
|
||||
assertEquals(blks[j].getBlock(), refreshed.getBlock());
|
||||
assertEquals(blks[j].getStartOffset(), refreshed.getStartOffset());
|
||||
|
@ -136,18 +136,18 @@ public class TestDFSStripedInputStream {
|
|||
public void testPread() throws Exception {
|
||||
final int numBlocks = 2;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE * numBlocks);
|
||||
int fileLen = BLOCK_GROUP_SIZE * numBlocks;
|
||||
filePath.toString(), 0, blockGroupSize * numBlocks);
|
||||
int fileLen = blockGroupSize * numBlocks;
|
||||
|
||||
byte[] expected = new byte[fileLen];
|
||||
assertEquals(numBlocks, lbs.getLocatedBlocks().size());
|
||||
for (int bgIdx = 0; bgIdx < numBlocks; bgIdx++) {
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(bgIdx));
|
||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||
for (int i = 0; i < dataBlocks; i++) {
|
||||
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||
stripesPerBlock * cellSize,
|
||||
bg.getBlock().getGenerationStamp());
|
||||
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||
|
@ -155,12 +155,12 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
|
||||
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||
expected[bgIdx*BLOCK_GROUP_SIZE + posInFile] =
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
|
||||
expected[bgIdx * blockGroupSize + posInFile] =
|
||||
SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -170,9 +170,9 @@ public class TestDFSStripedInputStream {
|
|||
DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(),
|
||||
filePath.toString(), false, ecPolicy, null);
|
||||
|
||||
int[] startOffsets = {0, 1, CELLSIZE - 102, CELLSIZE, CELLSIZE + 102,
|
||||
CELLSIZE*DATA_BLK_NUM, CELLSIZE*DATA_BLK_NUM + 102,
|
||||
BLOCK_GROUP_SIZE - 102, BLOCK_GROUP_SIZE, BLOCK_GROUP_SIZE + 102,
|
||||
int[] startOffsets = {0, 1, cellSize - 102, cellSize, cellSize + 102,
|
||||
cellSize * dataBlocks, cellSize * dataBlocks + 102,
|
||||
blockGroupSize - 102, blockGroupSize, blockGroupSize + 102,
|
||||
fileLen - 1};
|
||||
for (int startOffset : startOffsets) {
|
||||
startOffset = Math.max(0, Math.min(startOffset, fileLen - 1));
|
||||
|
@ -192,17 +192,17 @@ public class TestDFSStripedInputStream {
|
|||
@Test
|
||||
public void testPreadWithDNFailure() throws Exception {
|
||||
final int numBlocks = 4;
|
||||
final int failedDNIdx = DATA_BLK_NUM - 1;
|
||||
final int failedDNIdx = dataBlocks - 1;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE);
|
||||
filePath.toString(), 0, blockGroupSize);
|
||||
|
||||
assert lbs.get(0) instanceof LocatedStripedBlock;
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock)(lbs.get(0));
|
||||
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
|
||||
for (int i = 0; i < dataBlocks + parityBlocks; i++) {
|
||||
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||
stripesPerBlock * cellSize,
|
||||
bg.getBlock().getGenerationStamp());
|
||||
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||
|
@ -211,15 +211,15 @@ public class TestDFSStripedInputStream {
|
|||
DFSStripedInputStream in =
|
||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||
ecPolicy, null);
|
||||
int readSize = BLOCK_GROUP_SIZE;
|
||||
int readSize = blockGroupSize;
|
||||
byte[] readBuffer = new byte[readSize];
|
||||
byte[] expected = new byte[readSize];
|
||||
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
|
||||
expected[posInFile] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -227,32 +227,32 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||
dataBlocks, parityBlocks);
|
||||
RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
|
||||
ecPolicy.getCodecName(), coderOptions);
|
||||
|
||||
// Update the expected content for decoded data
|
||||
int[] missingBlkIdx = new int[PARITY_BLK_NUM];
|
||||
int[] missingBlkIdx = new int[parityBlocks];
|
||||
for (int i = 0; i < missingBlkIdx.length; i++) {
|
||||
if (i == 0) {
|
||||
missingBlkIdx[i] = failedDNIdx;
|
||||
} else {
|
||||
missingBlkIdx[i] = DATA_BLK_NUM + i;
|
||||
missingBlkIdx[i] = dataBlocks + i;
|
||||
}
|
||||
}
|
||||
cluster.stopDataNode(failedDNIdx);
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
|
||||
byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
|
||||
byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
int posInBuf = i * cellSize * dataBlocks + j * cellSize;
|
||||
if (j != failedDNIdx) {
|
||||
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
|
||||
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
|
||||
}
|
||||
}
|
||||
for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -261,8 +261,8 @@ public class TestDFSStripedInputStream {
|
|||
decodeInputs[m] = null;
|
||||
}
|
||||
rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
|
||||
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
|
||||
int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
|
||||
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
|
||||
}
|
||||
|
||||
int delta = 10;
|
||||
|
@ -278,8 +278,8 @@ public class TestDFSStripedInputStream {
|
|||
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||
done += in.read(delta, readBuffer, delta,
|
||||
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||
cellSize * (dataBlocks - 1) - 2 * delta);
|
||||
assertEquals(cellSize * (dataBlocks - 1) - delta, done);
|
||||
assertArrayEquals(Arrays.copyOf(expected, done),
|
||||
Arrays.copyOf(readBuffer, done));
|
||||
// read the rest
|
||||
|
@ -298,14 +298,14 @@ public class TestDFSStripedInputStream {
|
|||
private void testStatefulRead(boolean useByteBuffer,
|
||||
boolean cellMisalignPacket) throws Exception {
|
||||
final int numBlocks = 2;
|
||||
final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
|
||||
final int fileSize = numBlocks * blockGroupSize;
|
||||
if (cellMisalignPacket) {
|
||||
conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT + 1);
|
||||
tearDown();
|
||||
setup();
|
||||
}
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.
|
||||
getBlockLocations(filePath.toString(), 0, fileSize);
|
||||
|
||||
|
@ -313,9 +313,9 @@ public class TestDFSStripedInputStream {
|
|||
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
||||
assert lb instanceof LocatedStripedBlock;
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock)(lb);
|
||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||
for (int i = 0; i < dataBlocks; i++) {
|
||||
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||
stripesPerBlock * cellSize,
|
||||
bg.getBlock().getGenerationStamp());
|
||||
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||
|
@ -331,12 +331,12 @@ public class TestDFSStripedInputStream {
|
|||
|
||||
for (LocatedBlock bg : lbs.getLocatedBlocks()) {
|
||||
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
int posInFile = (int) bg.getStartOffset() +
|
||||
i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||
i * cellSize * dataBlocks + j * cellSize + k;
|
||||
expected[posInFile] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -369,17 +369,17 @@ public class TestDFSStripedInputStream {
|
|||
@Test
|
||||
public void testStatefulReadWithDNFailure() throws Exception {
|
||||
final int numBlocks = 4;
|
||||
final int failedDNIdx = DATA_BLK_NUM - 1;
|
||||
final int failedDNIdx = dataBlocks - 1;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE);
|
||||
filePath.toString(), 0, blockGroupSize);
|
||||
|
||||
assert lbs.get(0) instanceof LocatedStripedBlock;
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
|
||||
for (int i = 0; i < dataBlocks + parityBlocks; i++) {
|
||||
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||
stripesPerBlock * cellSize,
|
||||
bg.getBlock().getGenerationStamp());
|
||||
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||
|
@ -388,15 +388,15 @@ public class TestDFSStripedInputStream {
|
|||
DFSStripedInputStream in =
|
||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||
ecPolicy, null);
|
||||
int readSize = BLOCK_GROUP_SIZE;
|
||||
int readSize = blockGroupSize;
|
||||
byte[] readBuffer = new byte[readSize];
|
||||
byte[] expected = new byte[readSize];
|
||||
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
|
||||
expected[posInFile] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -404,32 +404,32 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||
dataBlocks, parityBlocks);
|
||||
RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
|
||||
ecPolicy.getCodecName(), coderOptions);
|
||||
|
||||
// Update the expected content for decoded data
|
||||
int[] missingBlkIdx = new int[PARITY_BLK_NUM];
|
||||
int[] missingBlkIdx = new int[parityBlocks];
|
||||
for (int i = 0; i < missingBlkIdx.length; i++) {
|
||||
if (i == 0) {
|
||||
missingBlkIdx[i] = failedDNIdx;
|
||||
} else {
|
||||
missingBlkIdx[i] = DATA_BLK_NUM + i;
|
||||
missingBlkIdx[i] = dataBlocks + i;
|
||||
}
|
||||
}
|
||||
cluster.stopDataNode(failedDNIdx);
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
|
||||
byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
|
||||
for (int i = 0; i < stripesPerBlock; i++) {
|
||||
byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
|
||||
byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
|
||||
for (int j = 0; j < dataBlocks; j++) {
|
||||
int posInBuf = i * cellSize * dataBlocks + j * cellSize;
|
||||
if (j != failedDNIdx) {
|
||||
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
|
||||
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
|
||||
}
|
||||
}
|
||||
for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
|
||||
for (int k = 0; k < cellSize; k++) {
|
||||
int posInBlk = i * cellSize + k;
|
||||
decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
|
@ -438,8 +438,8 @@ public class TestDFSStripedInputStream {
|
|||
decodeInputs[m] = null;
|
||||
}
|
||||
rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
|
||||
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
|
||||
int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
|
||||
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
|
||||
}
|
||||
|
||||
int delta = 10;
|
||||
|
@ -452,13 +452,13 @@ public class TestDFSStripedInputStream {
|
|||
// both head and trail cells are partial
|
||||
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||
while (done < (CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta)) {
|
||||
while (done < (cellSize * (dataBlocks - 1) - 2 * delta)) {
|
||||
int ret = in.read(readBuffer, delta,
|
||||
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||
cellSize * (dataBlocks - 1) - 2 * delta);
|
||||
assertTrue(ret > 0);
|
||||
done += ret;
|
||||
}
|
||||
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||
assertEquals(cellSize * (dataBlocks - 1) - delta, done);
|
||||
// read the rest
|
||||
|
||||
int restSize;
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
|
||||
|
@ -45,13 +47,15 @@ public class TestDFSStripedOutputStream {
|
|||
GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
|
||||
}
|
||||
|
||||
private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private Configuration conf;
|
||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int blockSize = cellSize * stripesPerBlock;
|
||||
|
||||
|
@ -169,6 +173,6 @@ public class TestDFSStripedOutputStream {
|
|||
StripedFileTestUtil.waitBlockGroupsReported(fs, src);
|
||||
|
||||
StripedFileTestUtil.checkData(fs, testPath, writeBytes,
|
||||
new ArrayList<DatanodeInfo>(), null);
|
||||
new ArrayList<DatanodeInfo>(), null, blockSize * dataBlocks);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
|
@ -35,6 +36,7 @@ import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.io.erasurecode.CodecUtil;
|
||||
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
|
||||
|
@ -44,6 +46,7 @@ import org.apache.hadoop.test.GenericTestUtils;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -59,7 +62,9 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure {
|
||||
public static final Log LOG = LogFactory.getLog(
|
||||
TestDFSStripedOutputStreamWithFailure.class);
|
||||
|
@ -71,53 +76,44 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
.getLogger().setLevel(Level.ALL);
|
||||
}
|
||||
|
||||
private static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private static final int CELL_SIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int STRIPES_PER_BLOCK = 4;
|
||||
private static final int BLOCK_SIZE = CELL_SIZE * STRIPES_PER_BLOCK;
|
||||
private static final int BLOCK_GROUP_SIZE = BLOCK_SIZE * NUM_DATA_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int blockSize = cellSize * stripesPerBlock;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
|
||||
private static final int FLUSH_POS =
|
||||
9 * DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT + 1;
|
||||
|
||||
static {
|
||||
System.out.println("NUM_DATA_BLOCKS = " + NUM_DATA_BLOCKS);
|
||||
System.out.println("NUM_PARITY_BLOCKS= " + NUM_PARITY_BLOCKS);
|
||||
System.out.println("CELL_SIZE = " + CELL_SIZE
|
||||
+ " (=" + StringUtils.TraditionalBinaryPrefix.long2String(CELL_SIZE, "B", 2) + ")");
|
||||
System.out.println("BLOCK_SIZE = " + BLOCK_SIZE
|
||||
+ " (=" + StringUtils.TraditionalBinaryPrefix.long2String(BLOCK_SIZE, "B", 2) + ")");
|
||||
System.out.println("BLOCK_GROUP_SIZE = " + BLOCK_GROUP_SIZE
|
||||
+ " (=" + StringUtils.TraditionalBinaryPrefix.long2String(BLOCK_GROUP_SIZE, "B", 2) + ")");
|
||||
}
|
||||
|
||||
static List<Integer> newLengths() {
|
||||
final List<Integer> lengths = new ArrayList<>();
|
||||
lengths.add(FLUSH_POS + 2);
|
||||
List<Integer> newLengths() {
|
||||
final List<Integer> lens = new ArrayList<>();
|
||||
lens.add(FLUSH_POS + 2);
|
||||
for(int b = 0; b <= 2; b++) {
|
||||
for(int c = 0; c < STRIPES_PER_BLOCK*NUM_DATA_BLOCKS; c++) {
|
||||
for(int c = 0; c < stripesPerBlock * dataBlocks; c++) {
|
||||
for(int delta = -1; delta <= 1; delta++) {
|
||||
final int length = b*BLOCK_GROUP_SIZE + c*CELL_SIZE + delta;
|
||||
System.out.println(lengths.size() + ": length=" + length
|
||||
final int length = b * blockGroupSize + c * cellSize + delta;
|
||||
System.out.println(lens.size() + ": length=" + length
|
||||
+ ", (b, c, d) = (" + b + ", " + c + ", " + delta + ")");
|
||||
lengths.add(length);
|
||||
lens.add(length);
|
||||
}
|
||||
}
|
||||
}
|
||||
return lengths;
|
||||
return lens;
|
||||
}
|
||||
|
||||
private static final int[][] dnIndexSuite = getDnIndexSuite();
|
||||
private final int[][] dnIndexSuite = getDnIndexSuite();
|
||||
|
||||
private static int[][] getDnIndexSuite() {
|
||||
private int[][] getDnIndexSuite() {
|
||||
final int maxNumLevel = 2;
|
||||
final int maxPerLevel = 8;
|
||||
List<List<Integer>> allLists = new ArrayList<>();
|
||||
int numIndex = NUM_PARITY_BLOCKS;
|
||||
int numIndex = parityBlocks;
|
||||
for (int i = 0; i < maxNumLevel && numIndex > 1; i++) {
|
||||
List<List<Integer>> lists =
|
||||
combinations(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, numIndex);
|
||||
combinations(dataBlocks + parityBlocks, numIndex);
|
||||
if (lists.size() > maxPerLevel) {
|
||||
Collections.shuffle(lists);
|
||||
lists = lists.subList(0, maxPerLevel);
|
||||
|
@ -125,15 +121,15 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
allLists.addAll(lists);
|
||||
numIndex--;
|
||||
}
|
||||
int[][] dnIndexSuite = new int[allLists.size()][];
|
||||
for (int i = 0; i < dnIndexSuite.length; i++) {
|
||||
int[][] dnIndexArray = new int[allLists.size()][];
|
||||
for (int i = 0; i < dnIndexArray.length; i++) {
|
||||
int[] list = new int[allLists.get(i).size()];
|
||||
for (int j = 0; j < list.length; j++) {
|
||||
list[j] = allLists.get(i).get(j);
|
||||
}
|
||||
dnIndexSuite[i] = list;
|
||||
dnIndexArray[i] = list;
|
||||
}
|
||||
return dnIndexSuite;
|
||||
return dnIndexArray;
|
||||
}
|
||||
|
||||
// get all combinations of k integers from {0,...,n-1}
|
||||
|
@ -171,10 +167,10 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
return positions;
|
||||
}
|
||||
|
||||
private static final List<Integer> LENGTHS = newLengths();
|
||||
private final List<Integer> lengths = newLengths();
|
||||
|
||||
static Integer getLength(int i) {
|
||||
return i >= 0 && i < LENGTHS.size()? LENGTHS.get(i): null;
|
||||
Integer getLength(int i) {
|
||||
return i >= 0 && i < lengths.size()? lengths.get(i): null;
|
||||
}
|
||||
|
||||
private static final Random RANDOM = new Random();
|
||||
|
@ -185,7 +181,18 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
+ TestDFSStripedOutputStreamWithFailure.class.getSimpleName());
|
||||
|
||||
private void setup(Configuration conf) throws IOException {
|
||||
final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
System.out.println("NUM_DATA_BLOCKS = " + dataBlocks);
|
||||
System.out.println("NUM_PARITY_BLOCKS= " + parityBlocks);
|
||||
System.out.println("CELL_SIZE = " + cellSize + " (=" +
|
||||
StringUtils.TraditionalBinaryPrefix.long2String(cellSize, "B", 2)
|
||||
+ ")");
|
||||
System.out.println("BLOCK_SIZE = " + blockSize + " (=" +
|
||||
StringUtils.TraditionalBinaryPrefix.long2String(blockSize, "B", 2)
|
||||
+ ")");
|
||||
System.out.println("BLOCK_GROUP_SIZE = " + blockGroupSize + " (=" +
|
||||
StringUtils.TraditionalBinaryPrefix.long2String(blockGroupSize, "B", 2)
|
||||
+ ")");
|
||||
final int numDNs = dataBlocks + parityBlocks;
|
||||
if (ErasureCodeNative.isNativeCodeLoaded()) {
|
||||
conf.set(
|
||||
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
|
||||
|
@ -195,7 +202,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.mkdirs(dir);
|
||||
dfs.setErasureCodingPolicy(dir, null);
|
||||
dfs.setErasureCodingPolicy(dir, ecPolicy);
|
||||
}
|
||||
|
||||
private void tearDown() {
|
||||
|
@ -206,7 +213,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
|
||||
private HdfsConfiguration newHdfsConfiguration() {
|
||||
final HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
|
@ -220,12 +227,12 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
}
|
||||
|
||||
/**
|
||||
* Randomly pick a length and run tests with multiple data failures
|
||||
* Randomly pick a length and run tests with multiple data failures.
|
||||
* TODO: enable this later
|
||||
*/
|
||||
//@Test(timeout=240000)
|
||||
public void testMultipleDatanodeFailureRandomLength() throws Exception {
|
||||
int lenIndex = RANDOM.nextInt(LENGTHS.size());
|
||||
int lenIndex = RANDOM.nextInt(lengths.size());
|
||||
LOG.info("run testMultipleDatanodeFailureRandomLength with length index: "
|
||||
+ lenIndex);
|
||||
runTestWithMultipleFailure(getLength(lenIndex));
|
||||
|
@ -233,7 +240,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
|
||||
@Test(timeout=240000)
|
||||
public void testBlockTokenExpired() throws Exception {
|
||||
final int length = NUM_DATA_BLOCKS * (BLOCK_SIZE - CELL_SIZE);
|
||||
final int length = dataBlocks * (blockSize - cellSize);
|
||||
final HdfsConfiguration conf = newHdfsConfiguration();
|
||||
|
||||
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||
|
@ -241,7 +248,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
|
||||
// Set short retry timeouts so this test runs faster
|
||||
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
|
||||
for (int dn = 0; dn < NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; dn += 2) {
|
||||
for (int dn = 0; dn < dataBlocks + parityBlocks; dn += 2) {
|
||||
try {
|
||||
setup(conf);
|
||||
runTest(length, new int[]{length / 2}, new int[]{dn}, true);
|
||||
|
@ -258,20 +265,21 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
|
||||
throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
try {
|
||||
setup(conf);
|
||||
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
|
||||
// shutdown few datanodes to avoid getting sufficient data blocks number
|
||||
// of datanodes
|
||||
int numDatanodes = dataNodes.size();
|
||||
while (numDatanodes >= NUM_DATA_BLOCKS) {
|
||||
while (numDatanodes >= dataBlocks) {
|
||||
cluster.stopDataNode(0);
|
||||
numDatanodes--;
|
||||
}
|
||||
cluster.restartNameNodes();
|
||||
cluster.triggerHeartbeats();
|
||||
DatanodeInfo[] info = dfs.getClient().datanodeReport(DatanodeReportType.LIVE);
|
||||
DatanodeInfo[] info = dfs.getClient().datanodeReport(
|
||||
DatanodeReportType.LIVE);
|
||||
assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
|
||||
final Path dirFile = new Path(dir, "ecfile");
|
||||
FSDataOutputStream out;
|
||||
|
@ -284,8 +292,8 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
} catch (IOException ioe) {
|
||||
// expected
|
||||
GenericTestUtils.assertExceptionContains("Failed to get " +
|
||||
NUM_DATA_BLOCKS + " nodes from namenode: blockGroupSize= " +
|
||||
(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS) + ", blocks.length= " +
|
||||
dataBlocks + " nodes from namenode: blockGroupSize= " +
|
||||
(dataBlocks + parityBlocks) + ", blocks.length= " +
|
||||
numDatanodes, ioe);
|
||||
}
|
||||
} finally {
|
||||
|
@ -294,14 +302,15 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
}
|
||||
|
||||
@Test(timeout = 90000)
|
||||
public void testAddBlockWhenNoSufficientParityNumOfNodes() throws IOException {
|
||||
public void testAddBlockWhenNoSufficientParityNumOfNodes()
|
||||
throws IOException {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
try {
|
||||
setup(conf);
|
||||
ArrayList<DataNode> dataNodes = cluster.getDataNodes();
|
||||
// shutdown few data nodes to avoid writing parity blocks
|
||||
int killDns = (NUM_PARITY_BLOCKS - 1);
|
||||
int killDns = (parityBlocks - 1);
|
||||
int numDatanodes = dataNodes.size() - killDns;
|
||||
for (int i = 0; i < killDns; i++) {
|
||||
cluster.stopDataNode(i);
|
||||
|
@ -312,11 +321,12 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
DatanodeReportType.LIVE);
|
||||
assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
|
||||
Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
|
||||
int fileLength = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1000;
|
||||
int fileLength = cellSize - 1000;
|
||||
final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
|
||||
DFSTestUtil.writeFile(dfs, srcPath, new String(expected));
|
||||
LOG.info("writing finished. Seek and read the file to verify.");
|
||||
StripedFileTestUtil.verifySeek(dfs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(dfs, srcPath, fileLength, ecPolicy,
|
||||
blockGroupSize);
|
||||
} finally {
|
||||
tearDown();
|
||||
}
|
||||
|
@ -324,7 +334,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
|
||||
void runTest(final int length) {
|
||||
final HdfsConfiguration conf = newHdfsConfiguration();
|
||||
for (int dn = 0; dn < NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS; dn++) {
|
||||
for (int dn = 0; dn < dataBlocks + parityBlocks; dn++) {
|
||||
try {
|
||||
LOG.info("runTest: dn=" + dn + ", length=" + length);
|
||||
setup(conf);
|
||||
|
@ -346,7 +356,8 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
int[] killPos = getKillPositions(length, dnIndex.length);
|
||||
try {
|
||||
LOG.info("runTestWithMultipleFailure: length==" + length + ", killPos="
|
||||
+ Arrays.toString(killPos) + ", dnIndex=" + Arrays.toString(dnIndex));
|
||||
+ Arrays.toString(killPos) + ", dnIndex="
|
||||
+ Arrays.toString(dnIndex));
|
||||
setup(conf);
|
||||
runTest(length, killPos, dnIndex, false);
|
||||
} catch (Throwable e) {
|
||||
|
@ -361,7 +372,7 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
}
|
||||
|
||||
/**
|
||||
* runTest implementation
|
||||
* runTest implementation.
|
||||
* @param length file length
|
||||
* @param killPos killing positions in ascending order
|
||||
* @param dnIndex DN index to kill when meets killing positions
|
||||
|
@ -371,8 +382,9 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
private void runTest(final int length, final int[] killPos,
|
||||
final int[] dnIndex, final boolean tokenExpire) throws Exception {
|
||||
if (killPos[0] <= FLUSH_POS) {
|
||||
LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS=" + FLUSH_POS
|
||||
+ ", length=" + length + ", dnIndex=" + Arrays.toString(dnIndex));
|
||||
LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS="
|
||||
+ FLUSH_POS + ", length=" + length + ", dnIndex="
|
||||
+ Arrays.toString(dnIndex));
|
||||
return; //skip test
|
||||
}
|
||||
Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s",
|
||||
|
@ -398,12 +410,13 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
final DFSStripedOutputStream stripedOut
|
||||
= (DFSStripedOutputStream)out.getWrappedStream();
|
||||
|
||||
long firstGS = -1; // first GS of this block group which never proceeds blockRecovery
|
||||
// first GS of this block group which never proceeds blockRecovery
|
||||
long firstGS = -1;
|
||||
long oldGS = -1; // the old GS before bumping
|
||||
List<Long> gsList = new ArrayList<>();
|
||||
final List<DatanodeInfo> killedDN = new ArrayList<>();
|
||||
int numKilled=0;
|
||||
for(; pos.get() < length; ) {
|
||||
for(; pos.get() < length;) {
|
||||
final int i = pos.getAndIncrement();
|
||||
if (numKilled < killPos.length && i == killPos[numKilled]) {
|
||||
assertTrue(firstGS != -1);
|
||||
|
@ -421,17 +434,18 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
waitTokenExpires(out);
|
||||
}
|
||||
|
||||
killedDN.add(killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
|
||||
killedDN.add(
|
||||
killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
|
||||
numKilled++;
|
||||
}
|
||||
|
||||
write(out, i);
|
||||
|
||||
if (i % BLOCK_GROUP_SIZE == FLUSH_POS) {
|
||||
if (i % blockGroupSize == FLUSH_POS) {
|
||||
firstGS = getGenerationStamp(stripedOut);
|
||||
oldGS = firstGS;
|
||||
}
|
||||
if (i > 0 && (i + 1) % BLOCK_GROUP_SIZE == 0) {
|
||||
if (i > 0 && (i + 1) % blockGroupSize == 0) {
|
||||
gsList.add(oldGS);
|
||||
}
|
||||
}
|
||||
|
@ -442,7 +456,8 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
|
||||
|
||||
cluster.triggerBlockReports();
|
||||
StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList);
|
||||
StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList,
|
||||
blockGroupSize);
|
||||
}
|
||||
|
||||
static void write(FSDataOutputStream out, int i) throws IOException {
|
||||
|
@ -508,43 +523,85 @@ public class TestDFSStripedOutputStreamWithFailure {
|
|||
}
|
||||
}
|
||||
|
||||
public static abstract class TestBase {
|
||||
static final long TIMEOUT = 240000;
|
||||
|
||||
int getBase() {
|
||||
final String name = getClass().getSimpleName();
|
||||
int i = name.length() - 1;
|
||||
for(; i >= 0 && Character.isDigit(name.charAt(i)); i--);
|
||||
return Integer.parseInt(name.substring(i + 1));
|
||||
int getBase() {
|
||||
final String name = getClass().getSimpleName();
|
||||
int i = name.length() - 1;
|
||||
for(; i >= 0 && Character.isDigit(name.charAt(i));){
|
||||
i--;
|
||||
}
|
||||
|
||||
private final TestDFSStripedOutputStreamWithFailure test
|
||||
= new TestDFSStripedOutputStreamWithFailure();
|
||||
private void run(int offset) {
|
||||
final int i = offset + getBase();
|
||||
final Integer length = getLength(i);
|
||||
if (length == null) {
|
||||
System.out.println("Skip test " + i + " since length=null.");
|
||||
return;
|
||||
}
|
||||
if (RANDOM.nextInt(16) != 0) {
|
||||
System.out.println("Test " + i + ", length=" + length
|
||||
+ ", is not chosen to run.");
|
||||
return;
|
||||
}
|
||||
System.out.println("Run test " + i + ", length=" + length);
|
||||
test.runTest(length);
|
||||
String number = name.substring(i + 1);
|
||||
try {
|
||||
return Integer.parseInt(number);
|
||||
} catch (Exception e) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=TIMEOUT) public void test0() {run(0);}
|
||||
@Test(timeout=TIMEOUT) public void test1() {run(1);}
|
||||
@Test(timeout=TIMEOUT) public void test2() {run(2);}
|
||||
@Test(timeout=TIMEOUT) public void test3() {run(3);}
|
||||
@Test(timeout=TIMEOUT) public void test4() {run(4);}
|
||||
@Test(timeout=TIMEOUT) public void test5() {run(5);}
|
||||
@Test(timeout=TIMEOUT) public void test6() {run(6);}
|
||||
@Test(timeout=TIMEOUT) public void test7() {run(7);}
|
||||
@Test(timeout=TIMEOUT) public void test8() {run(8);}
|
||||
@Test(timeout=TIMEOUT) public void test9() {run(9);}
|
||||
private void run(int offset) {
|
||||
int base = getBase();
|
||||
Assume.assumeTrue(base >= 0);
|
||||
final int i = offset + base;
|
||||
final Integer length = getLength(i);
|
||||
if (length == null) {
|
||||
System.out.println("Skip test " + i + " since length=null.");
|
||||
return;
|
||||
}
|
||||
if (RANDOM.nextInt(16) != 0) {
|
||||
System.out.println("Test " + i + ", length=" + length
|
||||
+ ", is not chosen to run.");
|
||||
return;
|
||||
}
|
||||
System.out.println("Run test " + i + ", length=" + length);
|
||||
runTest(length);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test0() {
|
||||
run(0);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test1() {
|
||||
run(1);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test2() {
|
||||
run(2);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test3() {
|
||||
run(3);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test4() {
|
||||
run(4);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test5() {
|
||||
run(5);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test6() {
|
||||
run(6);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test7() {
|
||||
run(7);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test8() {
|
||||
run(8);
|
||||
}
|
||||
|
||||
@Test(timeout = 240000)
|
||||
public void test9() {
|
||||
run(9);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure000 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure000
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure010 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure010
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure020 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure020
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure030 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure030
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure040 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure040
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure050 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure050
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure060 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure060
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure070 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure070
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure080 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure080
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure090 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure090
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure100 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure100
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure110 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure110
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure120 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure120
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure130 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure130
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure140 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure140
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure150 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure150
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure160 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure160
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure170 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure170
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure180 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure180
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure190 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure190
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure200 extends TestBase {}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure200
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,7 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.hdfs.TestDFSStripedOutputStreamWithFailure.TestBase;
|
||||
|
||||
public class TestDFSStripedOutputStreamWithFailure210 extends TestBase {
|
||||
}
|
||||
/**
|
||||
* Test striped file write operation with data node failures.
|
||||
*/
|
||||
public class TestDFSStripedOutputStreamWithFailure210
|
||||
extends TestDFSStripedOutputStreamWithFailure {}
|
|
@ -17,9 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -39,23 +36,18 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileChecksum;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
|
@ -90,10 +82,14 @@ public class TestDecommissionWithStriped {
|
|||
private Configuration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private int numDNs;
|
||||
private final int blockSize = StripedFileTestUtil.blockSize;
|
||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int blockSize = cellSize * 4;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
private final Path ecDir = new Path("/" + this.getClass().getSimpleName());
|
||||
|
||||
private FSNamesystem fsn;
|
||||
|
@ -132,12 +128,12 @@ public class TestDecommissionWithStriped {
|
|||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(
|
||||
DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
|
||||
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE - 1);
|
||||
cellSize - 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
|
||||
numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2;
|
||||
numDNs = dataBlocks + parityBlocks + 2;
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem(0);
|
||||
|
@ -194,7 +190,7 @@ public class TestDecommissionWithStriped {
|
|||
LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
|
||||
|
||||
final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
|
||||
int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2;
|
||||
int writeBytes = cellSize * dataBlocks * 2;
|
||||
writeStripedFile(dfs, ecFile, writeBytes);
|
||||
Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
|
||||
|
||||
|
@ -202,8 +198,8 @@ public class TestDecommissionWithStriped {
|
|||
LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
|
||||
.get(0);
|
||||
DatanodeInfo[] dnLocs = lb.getLocations();
|
||||
assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
|
||||
int decommNodeIndex = NUM_DATA_BLOCKS - 1;
|
||||
assertEquals(dataBlocks + parityBlocks, dnLocs.length);
|
||||
int decommNodeIndex = dataBlocks - 1;
|
||||
int stopNodeIndex = 1;
|
||||
|
||||
// add the nodes which will be decommissioning
|
||||
|
@ -273,7 +269,7 @@ public class TestDecommissionWithStriped {
|
|||
|
||||
assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
|
||||
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
|
||||
null);
|
||||
null, blockGroupSize);
|
||||
cleanupFile(dfs, ecFile);
|
||||
}
|
||||
|
||||
|
@ -294,7 +290,7 @@ public class TestDecommissionWithStriped {
|
|||
LOG.info("Starting test testFileChecksumAfterDecommission");
|
||||
|
||||
final Path ecFile = new Path(ecDir, "testFileChecksumAfterDecommission");
|
||||
int writeBytes = BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS;
|
||||
int writeBytes = cellSize * dataBlocks;
|
||||
writeStripedFile(dfs, ecFile, writeBytes);
|
||||
Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
|
||||
FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
|
||||
|
@ -303,7 +299,7 @@ public class TestDecommissionWithStriped {
|
|||
LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
|
||||
.get(0);
|
||||
DatanodeInfo[] dnLocs = lb.getLocations();
|
||||
assertEquals(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS, dnLocs.length);
|
||||
assertEquals(dataBlocks + parityBlocks, dnLocs.length);
|
||||
int decommNodeIndex = 1;
|
||||
|
||||
// add the node which will be decommissioning
|
||||
|
@ -312,7 +308,7 @@ public class TestDecommissionWithStriped {
|
|||
assertEquals(decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
|
||||
assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
|
||||
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
|
||||
null);
|
||||
null, blockGroupSize);
|
||||
|
||||
// verify checksum
|
||||
FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
|
||||
|
@ -355,7 +351,7 @@ public class TestDecommissionWithStriped {
|
|||
|
||||
assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
|
||||
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
|
||||
null);
|
||||
null, blockGroupSize);
|
||||
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
|
||||
|
@ -437,7 +433,7 @@ public class TestDecommissionWithStriped {
|
|||
StripedFileTestUtil.waitBlockGroupsReported(dfs, ecFile.toString());
|
||||
|
||||
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes,
|
||||
new ArrayList<DatanodeInfo>(), null);
|
||||
new ArrayList<DatanodeInfo>(), null, blockGroupSize);
|
||||
}
|
||||
|
||||
private void writeConfigFile(Path name, List<String> nodes)
|
||||
|
@ -528,7 +524,7 @@ public class TestDecommissionWithStriped {
|
|||
* decommissioned nodes, verify their replication is equal to what is
|
||||
* specified.
|
||||
*
|
||||
* @param downnode
|
||||
* @param decommissionedNodes
|
||||
* - if null, there is no decommissioned node for this file.
|
||||
* @return - null if no failure found, else an error message string.
|
||||
*/
|
||||
|
|
|
@ -39,16 +39,17 @@ public class TestErasureCodingPolicyWithSnapshot {
|
|||
private DistributedFileSystem fs;
|
||||
private Configuration conf;
|
||||
|
||||
private final static short GROUP_SIZE = (short) (StripedFileTestUtil.
|
||||
NUM_DATA_BLOCKS + StripedFileTestUtil.NUM_PARITY_BLOCKS);
|
||||
private final static int SUCCESS = 0;
|
||||
private final ErasureCodingPolicy sysDefaultPolicy =
|
||||
StripedFileTestUtil.TEST_EC_POLICY;
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short groupSize = (short) (
|
||||
sysDefaultPolicy.getNumDataUnits() +
|
||||
sysDefaultPolicy.getNumParityUnits());
|
||||
|
||||
@Before
|
||||
public void setupCluster() throws IOException {
|
||||
conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
|
|
@ -22,9 +22,11 @@ import org.apache.hadoop.fs.FileChecksum;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -44,16 +46,17 @@ import java.io.IOException;
|
|||
public class TestFileChecksum {
|
||||
private static final Logger LOG = LoggerFactory
|
||||
.getLogger(TestFileChecksum.class);
|
||||
|
||||
private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private Configuration conf;
|
||||
private DFSClient client;
|
||||
|
||||
private int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private int cellSize = ecPolicy.getCellSize();
|
||||
private int stripesPerBlock = 6;
|
||||
private int blockSize = cellSize * stripesPerBlock;
|
||||
private int numBlockGroups = 10;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -55,22 +56,19 @@ public class TestLeaseRecoveryStriped {
|
|||
public static final Log LOG = LogFactory
|
||||
.getLog(TestLeaseRecoveryStriped.class);
|
||||
|
||||
private static final ErasureCodingPolicy ecPolicy =
|
||||
StripedFileTestUtil.TEST_EC_POLICY;
|
||||
private static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private static final int CELL_SIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int STRIPE_SIZE = NUM_DATA_BLOCKS * CELL_SIZE;
|
||||
private static final int STRIPES_PER_BLOCK = 15;
|
||||
private static final int BLOCK_SIZE = CELL_SIZE * STRIPES_PER_BLOCK;
|
||||
private static final int BLOCK_GROUP_SIZE = BLOCK_SIZE * NUM_DATA_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripSize = dataBlocks * cellSize;
|
||||
private final int stripesPerBlock = 15;
|
||||
private final int blockSize = cellSize * stripesPerBlock;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
private static final int bytesPerChecksum = 512;
|
||||
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(DataNode.LOG, Level.ALL);
|
||||
StripedFileTestUtil.stripesPerBlock = STRIPES_PER_BLOCK;
|
||||
StripedFileTestUtil.blockSize = BLOCK_SIZE;
|
||||
StripedFileTestUtil.BLOCK_GROUP_SIZE = BLOCK_GROUP_SIZE;
|
||||
}
|
||||
|
||||
static private final String fakeUsername = "fakeUser1";
|
||||
|
@ -85,13 +83,13 @@ public class TestLeaseRecoveryStriped {
|
|||
@Before
|
||||
public void setup() throws IOException {
|
||||
conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 6000L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
final int numDNs = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
final int numDNs = dataBlocks + parityBlocks;
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
|
@ -106,7 +104,7 @@ public class TestLeaseRecoveryStriped {
|
|||
}
|
||||
}
|
||||
|
||||
private static int[][][] getBlockLengthsSuite() {
|
||||
private int[][][] getBlockLengthsSuite() {
|
||||
final int groups = 4;
|
||||
final int minNumCell = 3;
|
||||
final int maxNumCell = 11;
|
||||
|
@ -120,13 +118,13 @@ public class TestLeaseRecoveryStriped {
|
|||
delta = bytesPerChecksum;
|
||||
}
|
||||
int[][] suite = new int[2][];
|
||||
int[] lens = new int[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS];
|
||||
int[] lens = new int[dataBlocks + parityBlocks];
|
||||
long[] lenInLong = new long[lens.length];
|
||||
for (int j = 0; j < lens.length; j++) {
|
||||
int numCell = random.nextInt(maxNumCell - minNumCell + 1) + minNumCell;
|
||||
int numDelta = j < NUM_DATA_BLOCKS ?
|
||||
int numDelta = j < dataBlocks ?
|
||||
random.nextInt(maxNumDelta - minNumDelta + 1) + minNumDelta : 0;
|
||||
lens[j] = CELL_SIZE * numCell + delta * numDelta;
|
||||
lens[j] = cellSize * numCell + delta * numDelta;
|
||||
lenInLong[j] = lens[j];
|
||||
}
|
||||
suite[0] = lens;
|
||||
|
@ -137,13 +135,13 @@ public class TestLeaseRecoveryStriped {
|
|||
return blkLenSuite;
|
||||
}
|
||||
|
||||
private static final int[][][] BLOCK_LENGTHS_SUITE = getBlockLengthsSuite();
|
||||
private final int[][][] blockLengthsSuite = getBlockLengthsSuite();
|
||||
|
||||
@Test
|
||||
public void testLeaseRecovery() throws Exception {
|
||||
for (int i = 0; i < BLOCK_LENGTHS_SUITE.length; i++) {
|
||||
int[] blockLengths = BLOCK_LENGTHS_SUITE[i][0];
|
||||
int safeLength = BLOCK_LENGTHS_SUITE[i][1][0];
|
||||
for (int i = 0; i < blockLengthsSuite.length; i++) {
|
||||
int[] blockLengths = blockLengthsSuite[i][0];
|
||||
int safeLength = blockLengthsSuite[i][1][0];
|
||||
try {
|
||||
runTest(blockLengths, safeLength);
|
||||
} catch (Throwable e) {
|
||||
|
@ -162,20 +160,20 @@ public class TestLeaseRecoveryStriped {
|
|||
List<Long> oldGS = new ArrayList<>();
|
||||
oldGS.add(1001L);
|
||||
StripedFileTestUtil.checkData(dfs, p, safeLength,
|
||||
new ArrayList<DatanodeInfo>(), oldGS);
|
||||
new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
|
||||
// After recovery, storages are reported by primary DN. we should verify
|
||||
// storages reported by blockReport.
|
||||
cluster.restartNameNode(true);
|
||||
cluster.waitFirstBRCompleted(0, 10000);
|
||||
StripedFileTestUtil.checkData(dfs, p, safeLength,
|
||||
new ArrayList<DatanodeInfo>(), oldGS);
|
||||
new ArrayList<DatanodeInfo>(), oldGS, blockGroupSize);
|
||||
}
|
||||
|
||||
private void writePartialBlocks(int[] blockLengths) throws Exception {
|
||||
final FSDataOutputStream out = dfs.create(p);
|
||||
final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out
|
||||
.getWrappedStream();
|
||||
int length = (STRIPES_PER_BLOCK - 1) * STRIPE_SIZE;
|
||||
int length = (stripesPerBlock - 1) * stripSize;
|
||||
int[] posToKill = getPosToKill(blockLengths);
|
||||
int checkingPos = nextCheckingPos(posToKill, 0);
|
||||
try {
|
||||
|
@ -209,20 +207,20 @@ public class TestLeaseRecoveryStriped {
|
|||
}
|
||||
|
||||
private int[] getPosToKill(int[] blockLengths) {
|
||||
int[] posToKill = new int[NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS];
|
||||
for (int i = 0; i < NUM_DATA_BLOCKS; i++) {
|
||||
int numStripe = (blockLengths[i] - 1) / CELL_SIZE;
|
||||
posToKill[i] = numStripe * STRIPE_SIZE + i * CELL_SIZE
|
||||
+ blockLengths[i] % CELL_SIZE;
|
||||
if (blockLengths[i] % CELL_SIZE == 0) {
|
||||
posToKill[i] += CELL_SIZE;
|
||||
int[] posToKill = new int[dataBlocks + parityBlocks];
|
||||
for (int i = 0; i < dataBlocks; i++) {
|
||||
int numStripe = (blockLengths[i] - 1) / cellSize;
|
||||
posToKill[i] = numStripe * stripSize + i * cellSize
|
||||
+ blockLengths[i] % cellSize;
|
||||
if (blockLengths[i] % cellSize == 0) {
|
||||
posToKill[i] += cellSize;
|
||||
}
|
||||
}
|
||||
for (int i = NUM_DATA_BLOCKS; i < NUM_DATA_BLOCKS
|
||||
+ NUM_PARITY_BLOCKS; i++) {
|
||||
Preconditions.checkArgument(blockLengths[i] % CELL_SIZE == 0);
|
||||
int numStripe = (blockLengths[i]) / CELL_SIZE;
|
||||
posToKill[i] = numStripe * STRIPE_SIZE;
|
||||
for (int i = dataBlocks; i < dataBlocks
|
||||
+ parityBlocks; i++) {
|
||||
Preconditions.checkArgument(blockLengths[i] % cellSize == 0);
|
||||
int numStripe = (blockLengths[i]) / cellSize;
|
||||
posToKill[i] = numStripe * stripSize;
|
||||
}
|
||||
return posToKill;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
|||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
|
@ -52,9 +54,6 @@ import java.io.FileOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
||||
|
||||
public class TestReadStripedFileWithDecoding {
|
||||
static final Log LOG = LogFactory.getLog(TestReadStripedFileWithDecoding.class);
|
||||
|
||||
|
@ -68,15 +67,22 @@ public class TestReadStripedFileWithDecoding {
|
|||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private static final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int smallFileLength = blockSize * dataBlocks - 123;
|
||||
private final int largeFileLength = blockSize * dataBlocks + 123;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks =
|
||||
(short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripPerBlock = 4;
|
||||
private final int blockSize = cellSize * stripPerBlock;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
private final int smallFileLength = blockGroupSize - 123;
|
||||
private final int largeFileLength = blockGroupSize + 123;
|
||||
private final int[] fileLengths = {smallFileLength, largeFileLength};
|
||||
private static final int[] dnFailureNums = getDnFailureNums();
|
||||
private final int[] dnFailureNums = getDnFailureNums();
|
||||
|
||||
private static int[] getDnFailureNums() {
|
||||
private int[] getDnFailureNums() {
|
||||
int[] dnFailureNums = new int[parityBlocks];
|
||||
for (int i = 0; i < dnFailureNums.length; i++) {
|
||||
dnFailureNums[i] = i + 1;
|
||||
|
@ -191,7 +197,8 @@ public class TestReadStripedFileWithDecoding {
|
|||
StripedFileTestUtil.verifyStatefulRead(fs, testPath, length, expected, buffer);
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, testPath, length, expected,
|
||||
ByteBuffer.allocate(length + 100));
|
||||
StripedFileTestUtil.verifySeek(fs, testPath, length);
|
||||
StripedFileTestUtil.verifySeek(fs, testPath, length, ecPolicy,
|
||||
blockGroupSize);
|
||||
}
|
||||
|
||||
private void testReadWithDNFailure(int fileLength, int dnFailureNum)
|
||||
|
|
|
@ -22,7 +22,9 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.Rule;
|
||||
|
@ -30,11 +32,6 @@ import org.junit.rules.Timeout;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.TEST_EC_POLICY;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
||||
|
||||
/**
|
||||
* Test reading a striped file when some of its blocks are missing (not included
|
||||
* in the block locations returned by the NameNode).
|
||||
|
@ -45,8 +42,15 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private Configuration conf = new HdfsConfiguration();
|
||||
private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripPerBlock = 4;
|
||||
private final int blockSize = stripPerBlock * cellSize;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
private final int fileLength = blockSize * dataBlocks + 123;
|
||||
|
||||
@Rule
|
||||
|
@ -57,7 +61,7 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy(
|
||||
"/", TEST_EC_POLICY);
|
||||
"/", ecPolicy);
|
||||
fs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
|
@ -70,9 +74,9 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
|
||||
@Test
|
||||
public void testReadFileWithMissingBlocks() throws Exception {
|
||||
for (int missingData = 1; missingData <= NUM_PARITY_BLOCKS; missingData++) {
|
||||
for (int missingData = 1; missingData <= dataBlocks; missingData++) {
|
||||
for (int missingParity = 0; missingParity <=
|
||||
NUM_PARITY_BLOCKS - missingData; missingParity++) {
|
||||
parityBlocks - missingData; missingParity++) {
|
||||
try {
|
||||
setup();
|
||||
readFileWithMissingBlocks(new Path("/foo"), fileLength,
|
||||
|
@ -102,7 +106,7 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
}
|
||||
for (int i = 0; i < missingParityNum; i++) {
|
||||
missingDataNodes[i + missingDataNum] = i +
|
||||
Math.min(StripedFileTestUtil.NUM_DATA_BLOCKS, dataBlocks);
|
||||
Math.min(ecPolicy.getNumDataUnits(), dataBlocks);
|
||||
}
|
||||
stopDataNodes(locs, missingDataNodes);
|
||||
|
||||
|
@ -112,7 +116,8 @@ public class TestReadStripedFileWithMissingBlocks {
|
|||
|
||||
byte[] smallBuf = new byte[1024];
|
||||
byte[] largeBuf = new byte[fileLength + 100];
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
||||
blockGroupSize);
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||
smallBuf);
|
||||
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
|
@ -60,12 +61,14 @@ import org.junit.Test;
|
|||
public class TestReconstructStripedFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestReconstructStripedFile.class);
|
||||
|
||||
private static final int dataBlkNum = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static final int parityBlkNum = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int blockSize = cellSize * 3;
|
||||
private static final int groupSize = dataBlkNum + parityBlkNum;
|
||||
private static final int dnNum = groupSize + parityBlkNum;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int blockSize = cellSize * 3;
|
||||
private final int groupSize = dataBlkNum + parityBlkNum;
|
||||
private final int dnNum = groupSize + parityBlkNum;
|
||||
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.junit.After;
|
||||
|
@ -41,11 +43,13 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
public class TestSafeModeWithStripedFile {
|
||||
|
||||
static final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
static final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
static final int numDNs = DATA_BLK_NUM + PARITY_BLK_NUM;
|
||||
static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
static final int blockSize = cellSize * 2;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int blockSize = cellSize * 2;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
|
@ -78,7 +82,7 @@ public class TestSafeModeWithStripedFile {
|
|||
|
||||
@Test
|
||||
public void testStripedFile1() throws IOException {
|
||||
int numCell = DATA_BLK_NUM - 1;
|
||||
int numCell = dataBlocks - 1;
|
||||
doTest(cellSize * numCell, numCell);
|
||||
}
|
||||
|
||||
|
@ -101,7 +105,7 @@ public class TestSafeModeWithStripedFile {
|
|||
// If we only have 1 block, NN won't enter safemode in the first place
|
||||
// because the threshold is 0 blocks.
|
||||
// So we need to add another 2 blocks.
|
||||
int bigSize = blockSize * DATA_BLK_NUM * 2;
|
||||
int bigSize = blockSize * dataBlocks * 2;
|
||||
Path bigFilePath = new Path("/testStripedFile_" + bigSize);
|
||||
data = StripedFileTestUtil.generateBytes(bigSize);
|
||||
DFSTestUtil.writeFile(fs, bigFilePath, data);
|
||||
|
@ -143,7 +147,7 @@ public class TestSafeModeWithStripedFile {
|
|||
assertEquals(1, NameNodeAdapter.getSafeModeSafeBlocks(nn));
|
||||
|
||||
// the 2 blocks of bigFile need DATA_BLK_NUM storages to be safe
|
||||
for (int i = minStorages; i < DATA_BLK_NUM - 1; i++) {
|
||||
for (int i = minStorages; i < dataBlocks - 1; i++) {
|
||||
cluster.restartDataNode(dnprops.remove(0));
|
||||
cluster.waitActive();
|
||||
cluster.triggerBlockReports();
|
||||
|
|
|
@ -24,8 +24,10 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
@ -43,14 +45,18 @@ import java.nio.ByteBuffer;
|
|||
import java.util.Arrays;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.stripesPerBlock;
|
||||
|
||||
public class TestWriteReadStripedFile {
|
||||
public static final Log LOG = LogFactory.getLog(TestWriteReadStripedFile.class);
|
||||
private static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int blockSize = stripesPerBlock * cellSize;
|
||||
private final int blockGroupSize = blockSize * dataBlocks;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem fs;
|
||||
private Configuration conf = new HdfsConfiguration();
|
||||
|
@ -221,7 +227,8 @@ public class TestWriteReadStripedFile {
|
|||
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||
largeBuf);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
||||
blockGroupSize);
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||
ByteBuffer.allocate(fileLength + 100));
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||
|
@ -263,7 +270,8 @@ public class TestWriteReadStripedFile {
|
|||
//StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
||||
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, largeBuf);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
||||
blockGroupSize);
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected, smallBuf);
|
||||
// webhdfs doesn't support bytebuffer read
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Assert;
|
||||
|
@ -32,9 +34,6 @@ import org.junit.Test;
|
|||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.blockSize;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.numDNs;
|
||||
|
||||
public class TestWriteStripedFileWithFailure {
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(TestWriteStripedFileWithFailure.class);
|
||||
|
@ -47,8 +46,12 @@ public class TestWriteStripedFileWithFailure {
|
|||
GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.ALL);
|
||||
}
|
||||
|
||||
private final short dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final short parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int numDNs = dataBlocks + parityBlocks;
|
||||
private final int blockSize = 4 * ecPolicy.getCellSize();
|
||||
private final int smallFileLength = blockSize * dataBlocks - 123;
|
||||
private final int largeFileLength = blockSize * dataBlocks + 123;
|
||||
private final int[] fileLengths = {smallFileLength, largeFileLength};
|
||||
|
@ -153,7 +156,8 @@ public class TestWriteStripedFileWithFailure {
|
|||
byte[] largeBuf = new byte[fileLength + 100];
|
||||
final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
|
||||
StripedFileTestUtil.verifyLength(fs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength);
|
||||
StripedFileTestUtil.verifySeek(fs, srcPath, fileLength, ecPolicy,
|
||||
blockSize * dataBlocks);
|
||||
StripedFileTestUtil.verifyStatefulRead(fs, srcPath, fileLength, expected,
|
||||
smallBuf);
|
||||
StripedFileTestUtil.verifyPread(fs, srcPath, fileLength, expected, largeBuf);
|
||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
|||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -212,7 +211,7 @@ public class TestPBHelper {
|
|||
datanodeUuids, storageIDs, storageTypes);
|
||||
if (isStriped) {
|
||||
blkLocs = new StripedBlockWithLocations(blkLocs, indices, dataBlkNum,
|
||||
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE);
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize());
|
||||
}
|
||||
return blkLocs;
|
||||
}
|
||||
|
|
|
@ -44,6 +44,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
|||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.junit.AfterClass;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -196,15 +199,17 @@ public class TestBalancer {
|
|||
conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1L);
|
||||
}
|
||||
|
||||
int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
int groupSize = dataBlocks + parityBlocks;
|
||||
private final static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final static int stripesPerBlock = 4;
|
||||
static int DEFAULT_STRIPE_BLOCK_SIZE = cellSize * stripesPerBlock;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int groupSize = dataBlocks + parityBlocks;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int defaultBlockSize = cellSize * stripesPerBlock;
|
||||
|
||||
static void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_STRIPE_BLOCK_SIZE);
|
||||
void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
|
@ -1908,7 +1913,7 @@ public class TestBalancer {
|
|||
private void doTestBalancerWithStripedFile(Configuration conf) throws Exception {
|
||||
int numOfDatanodes = dataBlocks + parityBlocks + 2;
|
||||
int numOfRacks = dataBlocks;
|
||||
long capacity = 20 * DEFAULT_STRIPE_BLOCK_SIZE;
|
||||
long capacity = 20 * defaultBlockSize;
|
||||
long[] capacities = new long[numOfDatanodes];
|
||||
for (int i = 0; i < capacities.length; i++) {
|
||||
capacities[i] = capacity;
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.junit.Assert;
|
||||
|
@ -33,8 +32,6 @@ import java.io.DataOutputStream;
|
|||
import java.io.ByteArrayOutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
@ -43,11 +40,12 @@ import static org.junit.Assert.fail;
|
|||
* Test {@link BlockInfoStriped}
|
||||
*/
|
||||
public class TestBlockInfoStriped {
|
||||
private static final int TOTAL_NUM_BLOCKS = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
private static final long BASE_ID = -1600;
|
||||
private static final Block baseBlock = new Block(BASE_ID);
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
private final Block baseBlock = new Block(BASE_ID);
|
||||
private final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int totalBlocks = testECPolicy.getNumDataUnits() +
|
||||
testECPolicy.getNumParityUnits();
|
||||
private final BlockInfoStriped info = new BlockInfoStriped(baseBlock,
|
||||
testECPolicy);
|
||||
|
||||
|
@ -70,8 +68,8 @@ public class TestBlockInfoStriped {
|
|||
// first add NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS storages, i.e., a complete
|
||||
// group of blocks/storages
|
||||
DatanodeStorageInfo[] storageInfos = DFSTestUtil.createDatanodeStorageInfos(
|
||||
TOTAL_NUM_BLOCKS);
|
||||
Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
|
||||
totalBlocks);
|
||||
Block[] blocks = createReportedBlocks(totalBlocks);
|
||||
int i = 0;
|
||||
for (; i < storageInfos.length; i += 2) {
|
||||
info.addStorage(storageInfos[i], blocks[i]);
|
||||
|
@ -85,8 +83,8 @@ public class TestBlockInfoStriped {
|
|||
|
||||
// check
|
||||
byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
|
||||
Assert.assertEquals(totalBlocks, info.getCapacity());
|
||||
Assert.assertEquals(totalBlocks, indices.length);
|
||||
i = 0;
|
||||
for (DatanodeStorageInfo storage : storageInfos) {
|
||||
int index = info.findStorageInfo(storage);
|
||||
|
@ -99,9 +97,9 @@ public class TestBlockInfoStriped {
|
|||
for (DatanodeStorageInfo storage : storageInfos) {
|
||||
Assert.assertTrue(info.addStorage(storage, blocks[i++]));
|
||||
}
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, info.numNodes());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, indices.length);
|
||||
Assert.assertEquals(totalBlocks, info.getCapacity());
|
||||
Assert.assertEquals(totalBlocks, info.numNodes());
|
||||
Assert.assertEquals(totalBlocks, indices.length);
|
||||
i = 0;
|
||||
for (DatanodeStorageInfo storage : storageInfos) {
|
||||
int index = info.findStorageInfo(storage);
|
||||
|
@ -111,19 +109,19 @@ public class TestBlockInfoStriped {
|
|||
|
||||
// the same block is reported from another storage
|
||||
DatanodeStorageInfo[] storageInfos2 = DFSTestUtil.createDatanodeStorageInfos(
|
||||
TOTAL_NUM_BLOCKS * 2);
|
||||
totalBlocks * 2);
|
||||
// only add the second half of info2
|
||||
for (i = TOTAL_NUM_BLOCKS; i < storageInfos2.length; i++) {
|
||||
info.addStorage(storageInfos2[i], blocks[i % TOTAL_NUM_BLOCKS]);
|
||||
for (i = totalBlocks; i < storageInfos2.length; i++) {
|
||||
info.addStorage(storageInfos2[i], blocks[i % totalBlocks]);
|
||||
Assert.assertEquals(i + 1, info.getCapacity());
|
||||
Assert.assertEquals(i + 1, info.numNodes());
|
||||
indices = (byte[]) Whitebox.getInternalState(info, "indices");
|
||||
Assert.assertEquals(i + 1, indices.length);
|
||||
}
|
||||
for (i = TOTAL_NUM_BLOCKS; i < storageInfos2.length; i++) {
|
||||
for (i = totalBlocks; i < storageInfos2.length; i++) {
|
||||
int index = info.findStorageInfo(storageInfos2[i]);
|
||||
Assert.assertEquals(i++, index);
|
||||
Assert.assertEquals(index - TOTAL_NUM_BLOCKS, indices[index]);
|
||||
Assert.assertEquals(index - totalBlocks, indices[index]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -131,8 +129,8 @@ public class TestBlockInfoStriped {
|
|||
public void testRemoveStorage() {
|
||||
// first add TOTAL_NUM_BLOCKS into the BlockInfoStriped
|
||||
DatanodeStorageInfo[] storages = DFSTestUtil.createDatanodeStorageInfos(
|
||||
TOTAL_NUM_BLOCKS);
|
||||
Block[] blocks = createReportedBlocks(TOTAL_NUM_BLOCKS);
|
||||
totalBlocks);
|
||||
Block[] blocks = createReportedBlocks(totalBlocks);
|
||||
for (int i = 0; i < storages.length; i++) {
|
||||
info.addStorage(storages[i], blocks[i]);
|
||||
}
|
||||
|
@ -142,8 +140,8 @@ public class TestBlockInfoStriped {
|
|||
info.removeStorage(storages[2]);
|
||||
|
||||
// check
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS, info.getCapacity());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS - 2, info.numNodes());
|
||||
Assert.assertEquals(totalBlocks, info.getCapacity());
|
||||
Assert.assertEquals(totalBlocks - 2, info.numNodes());
|
||||
byte[] indices = (byte[]) Whitebox.getInternalState(info, "indices");
|
||||
for (int i = 0; i < storages.length; i++) {
|
||||
int index = info.findStorageInfo(storages[i]);
|
||||
|
@ -158,44 +156,44 @@ public class TestBlockInfoStriped {
|
|||
|
||||
// the same block is reported from another storage
|
||||
DatanodeStorageInfo[] storages2 = DFSTestUtil.createDatanodeStorageInfos(
|
||||
TOTAL_NUM_BLOCKS * 2);
|
||||
for (int i = TOTAL_NUM_BLOCKS; i < storages2.length; i++) {
|
||||
info.addStorage(storages2[i], blocks[i % TOTAL_NUM_BLOCKS]);
|
||||
totalBlocks * 2);
|
||||
for (int i = totalBlocks; i < storages2.length; i++) {
|
||||
info.addStorage(storages2[i], blocks[i % totalBlocks]);
|
||||
}
|
||||
// now we should have 8 storages
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.numNodes());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.getCapacity());
|
||||
Assert.assertEquals(totalBlocks * 2 - 2, info.numNodes());
|
||||
Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
|
||||
indices = (byte[]) Whitebox.getInternalState(info, "indices");
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, indices.length);
|
||||
int j = TOTAL_NUM_BLOCKS;
|
||||
for (int i = TOTAL_NUM_BLOCKS; i < storages2.length; i++) {
|
||||
Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
|
||||
int j = totalBlocks;
|
||||
for (int i = totalBlocks; i < storages2.length; i++) {
|
||||
int index = info.findStorageInfo(storages2[i]);
|
||||
if (i == TOTAL_NUM_BLOCKS || i == TOTAL_NUM_BLOCKS + 2) {
|
||||
Assert.assertEquals(i - TOTAL_NUM_BLOCKS, index);
|
||||
if (i == totalBlocks || i == totalBlocks + 2) {
|
||||
Assert.assertEquals(i - totalBlocks, index);
|
||||
} else {
|
||||
Assert.assertEquals(j++, index);
|
||||
}
|
||||
}
|
||||
|
||||
// remove the storages from storages2
|
||||
for (int i = 0; i < TOTAL_NUM_BLOCKS; i++) {
|
||||
info.removeStorage(storages2[i + TOTAL_NUM_BLOCKS]);
|
||||
for (int i = 0; i < totalBlocks; i++) {
|
||||
info.removeStorage(storages2[i + totalBlocks]);
|
||||
}
|
||||
// now we should have 3 storages
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS - 2, info.numNodes());
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, info.getCapacity());
|
||||
Assert.assertEquals(totalBlocks - 2, info.numNodes());
|
||||
Assert.assertEquals(totalBlocks * 2 - 2, info.getCapacity());
|
||||
indices = (byte[]) Whitebox.getInternalState(info, "indices");
|
||||
Assert.assertEquals(TOTAL_NUM_BLOCKS * 2 - 2, indices.length);
|
||||
for (int i = 0; i < TOTAL_NUM_BLOCKS; i++) {
|
||||
Assert.assertEquals(totalBlocks * 2 - 2, indices.length);
|
||||
for (int i = 0; i < totalBlocks; i++) {
|
||||
if (i == 0 || i == 2) {
|
||||
int index = info.findStorageInfo(storages2[i + TOTAL_NUM_BLOCKS]);
|
||||
int index = info.findStorageInfo(storages2[i + totalBlocks]);
|
||||
Assert.assertEquals(-1, index);
|
||||
} else {
|
||||
int index = info.findStorageInfo(storages[i]);
|
||||
Assert.assertEquals(i, index);
|
||||
}
|
||||
}
|
||||
for (int i = TOTAL_NUM_BLOCKS; i < TOTAL_NUM_BLOCKS * 2 - 2; i++) {
|
||||
for (int i = totalBlocks; i < totalBlocks * 2 - 2; i++) {
|
||||
Assert.assertEquals(-1, indices[i]);
|
||||
Assert.assertNull(info.getDatanode(i));
|
||||
}
|
||||
|
|
|
@ -20,10 +20,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.net.ServerSocketUtil;
|
||||
import org.junit.Rule;
|
||||
|
@ -33,12 +34,13 @@ import org.junit.rules.Timeout;
|
|||
import java.io.IOException;
|
||||
|
||||
public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
|
||||
|
||||
private final static int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final static int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final static int stripesPerBlock = 4;
|
||||
private final static int numDNs = dataBlocks + parityBlocks + 2;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int numDNs = dataBlocks + parityBlocks + 2;
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
|
||||
|
|
|
@ -24,10 +24,12 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
|
@ -46,10 +48,6 @@ import java.util.Arrays;
|
|||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
|
||||
public class TestReconstructStripedBlocksWithRackAwareness {
|
||||
public static final Logger LOG = LoggerFactory.getLogger(
|
||||
TestReconstructStripedBlocksWithRackAwareness.class);
|
||||
|
@ -60,10 +58,14 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
GenericTestUtils.setLogLevel(BlockManager.LOG, Level.ALL);
|
||||
}
|
||||
|
||||
private static final String[] hosts =
|
||||
getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1);
|
||||
private static final String[] racks =
|
||||
getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 1, NUM_DATA_BLOCKS);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final String[] hosts = getHosts(dataBlocks + parityBlocks + 1);
|
||||
private final String[] racks =
|
||||
getRacks(dataBlocks + parityBlocks + 1, dataBlocks);
|
||||
|
||||
private static String[] getHosts(int numHosts) {
|
||||
String[] hosts = new String[numHosts];
|
||||
|
@ -157,7 +159,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
final Path file = new Path("/foo");
|
||||
// the file's block is in 9 dn but 5 racks
|
||||
DFSTestUtil.createFile(fs, file,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
|
||||
cellSize * dataBlocks * 2, (short) 1, 0L);
|
||||
Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
|
||||
|
||||
final INodeFile fileNode = fsn.getFSDirectory()
|
||||
|
@ -169,7 +171,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
for (DatanodeStorageInfo storage : blockInfo.storages) {
|
||||
rackSet.add(storage.getDatanodeDescriptor().getNetworkLocation());
|
||||
}
|
||||
Assert.assertEquals(NUM_DATA_BLOCKS - 1, rackSet.size());
|
||||
Assert.assertEquals(dataBlocks - 1, rackSet.size());
|
||||
|
||||
// restart the stopped datanode
|
||||
cluster.restartDataNode(lastHost);
|
||||
|
@ -178,7 +180,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
// make sure we have 6 racks again
|
||||
NetworkTopology topology = bm.getDatanodeManager().getNetworkTopology();
|
||||
Assert.assertEquals(hosts.length, topology.getNumOfLeaves());
|
||||
Assert.assertEquals(NUM_DATA_BLOCKS, topology.getNumOfRacks());
|
||||
Assert.assertEquals(dataBlocks, topology.getNumOfRacks());
|
||||
|
||||
// pause all the heartbeats
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
|
@ -225,7 +227,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
|
||||
final Path file = new Path("/foo");
|
||||
DFSTestUtil.createFile(fs, file,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
|
||||
cellSize * dataBlocks * 2, (short) 1, 0L);
|
||||
|
||||
// stop host1
|
||||
MiniDFSCluster.DataNodeProperties host1 = stopDataNode("host1");
|
||||
|
@ -234,7 +236,7 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
cluster.waitActive();
|
||||
|
||||
// wait for reconstruction to finish
|
||||
final short blockNum = (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
|
||||
final short blockNum = (short) (dataBlocks + parityBlocks);
|
||||
DFSTestUtil.waitForReplication(fs, file, blockNum, 15 * 1000);
|
||||
|
||||
// restart host1
|
||||
|
@ -263,12 +265,12 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
*/
|
||||
@Test
|
||||
public void testReconstructionWithDecommission() throws Exception {
|
||||
final String[] racks = getRacks(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2,
|
||||
NUM_DATA_BLOCKS);
|
||||
final String[] hosts = getHosts(NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS + 2);
|
||||
final String[] rackNames = getRacks(dataBlocks + parityBlocks + 2,
|
||||
dataBlocks);
|
||||
final String[] hostNames = getHosts(dataBlocks + parityBlocks + 2);
|
||||
// we now have 11 hosts on 6 racks with distribution: 2-2-2-2-2-1
|
||||
cluster = new MiniDFSCluster.Builder(conf).racks(racks).hosts(hosts)
|
||||
.numDataNodes(hosts.length).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).racks(rackNames).hosts(hostNames)
|
||||
.numDataNodes(hostNames.length).build();
|
||||
cluster.waitActive();
|
||||
fs = cluster.getFileSystem();
|
||||
fs.setErasureCodingPolicy(new Path("/"), null);
|
||||
|
@ -277,11 +279,13 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
final DatanodeManager dm = bm.getDatanodeManager();
|
||||
|
||||
// stop h9 and h10 and create a file with 6+3 internal blocks
|
||||
MiniDFSCluster.DataNodeProperties h9 = stopDataNode(hosts[hosts.length - 3]);
|
||||
MiniDFSCluster.DataNodeProperties h10 = stopDataNode(hosts[hosts.length - 2]);
|
||||
MiniDFSCluster.DataNodeProperties h9 =
|
||||
stopDataNode(hostNames[hostNames.length - 3]);
|
||||
MiniDFSCluster.DataNodeProperties h10 =
|
||||
stopDataNode(hostNames[hostNames.length - 2]);
|
||||
final Path file = new Path("/foo");
|
||||
DFSTestUtil.createFile(fs, file,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
|
||||
cellSize * dataBlocks * 2, (short) 1, 0L);
|
||||
final BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
|
||||
.getINode(file.toString()).asFile().getLastBlock();
|
||||
|
||||
|
@ -290,18 +294,19 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
cluster.waitActive();
|
||||
|
||||
// stop h11 so that the reconstruction happens
|
||||
MiniDFSCluster.DataNodeProperties h11 = stopDataNode(hosts[hosts.length - 1]);
|
||||
MiniDFSCluster.DataNodeProperties h11 =
|
||||
stopDataNode(hostNames[hostNames.length - 1]);
|
||||
boolean recovered = bm.countNodes(blockInfo).liveReplicas() >=
|
||||
NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
dataBlocks + parityBlocks;
|
||||
for (int i = 0; i < 10 & !recovered; i++) {
|
||||
Thread.sleep(1000);
|
||||
recovered = bm.countNodes(blockInfo).liveReplicas() >=
|
||||
NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
dataBlocks + parityBlocks;
|
||||
}
|
||||
Assert.assertTrue(recovered);
|
||||
|
||||
// mark h9 as decommissioning
|
||||
DataNode datanode9 = getDataNode(hosts[hosts.length - 3]);
|
||||
DataNode datanode9 = getDataNode(hostNames[hostNames.length - 3]);
|
||||
Assert.assertNotNull(datanode9);
|
||||
final DatanodeDescriptor dn9 = dm.getDatanode(datanode9.getDatanodeId());
|
||||
dn9.startDecommission();
|
||||
|
@ -310,7 +315,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
|
|||
cluster.restartDataNode(h10);
|
||||
cluster.restartDataNode(h11);
|
||||
cluster.waitActive();
|
||||
DataNodeTestUtils.triggerBlockReport(getDataNode(hosts[hosts.length - 1]));
|
||||
DataNodeTestUtils.triggerBlockReport(
|
||||
getDataNode(hostNames[hostNames.length - 1]));
|
||||
|
||||
// start decommissioning h9
|
||||
boolean satisfied = bm.isPlacementPolicySatisfied(blockInfo);
|
||||
|
|
|
@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
|
@ -56,11 +57,13 @@ public class TestSequentialBlockGroupId {
|
|||
private static final Log LOG = LogFactory
|
||||
.getLog("TestSequentialBlockGroupId");
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short REPLICATION = 1;
|
||||
private final long SEED = 0;
|
||||
private final int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
|
||||
private final int stripesPerBlock = 2;
|
||||
private final int blockSize = cellSize * stripesPerBlock;
|
||||
|
|
|
@ -27,13 +27,14 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -51,10 +52,14 @@ import org.slf4j.LoggerFactory;
|
|||
public class TestSortLocatedStripedBlock {
|
||||
static final Logger LOG = LoggerFactory
|
||||
.getLogger(TestSortLocatedStripedBlock.class);
|
||||
static final int BLK_GROUP_WIDTH = StripedFileTestUtil.NUM_DATA_BLOCKS
|
||||
+ StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
static final int NUM_DATA_BLOCKS = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
static final int NUM_PARITY_BLOCKS = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int groupSize = dataBlocks + parityBlocks;
|
||||
|
||||
static DatanodeManager dm;
|
||||
static final long STALE_INTERVAL = 30 * 1000 * 60;
|
||||
|
||||
|
@ -100,7 +105,7 @@ public class TestSortLocatedStripedBlock {
|
|||
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
|
||||
lbsCount * decommnNodeIndices.size());
|
||||
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
|
||||
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
|
||||
dataBlocks, parityBlocks, decommnNodeIndices,
|
||||
targetNodeIndices, decommissionedNodes);
|
||||
|
||||
// prepare expected block index and token list.
|
||||
|
@ -111,7 +116,7 @@ public class TestSortLocatedStripedBlock {
|
|||
|
||||
dm.sortLocatedBlocks(null, lbs);
|
||||
|
||||
assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
|
||||
assertDecommnNodePosition(groupSize, decommissionedNodes, lbs);
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
}
|
||||
|
||||
|
@ -156,7 +161,7 @@ public class TestSortLocatedStripedBlock {
|
|||
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
|
||||
lbsCount * decommnNodeIndices.size());
|
||||
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
|
||||
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
|
||||
dataBlocks, parityBlocks, decommnNodeIndices,
|
||||
targetNodeIndices, decommissionedNodes);
|
||||
|
||||
// prepare expected block index and token list.
|
||||
|
@ -166,7 +171,7 @@ public class TestSortLocatedStripedBlock {
|
|||
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
|
||||
|
||||
dm.sortLocatedBlocks(null, lbs);
|
||||
assertDecommnNodePosition(BLK_GROUP_WIDTH, decommissionedNodes, lbs);
|
||||
assertDecommnNodePosition(groupSize, decommissionedNodes, lbs);
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
}
|
||||
|
||||
|
@ -209,9 +214,9 @@ public class TestSortLocatedStripedBlock {
|
|||
// which will be used for assertions
|
||||
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
|
||||
lbsCount * decommnNodeIndices.size());
|
||||
int dataBlksNum = NUM_DATA_BLOCKS - 2;
|
||||
int dataBlksNum = dataBlocks - 2;
|
||||
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlksNum,
|
||||
NUM_PARITY_BLOCKS, decommnNodeIndices, targetNodeIndices,
|
||||
parityBlocks, decommnNodeIndices, targetNodeIndices,
|
||||
decommissionedNodes);
|
||||
|
||||
// prepare expected block index and token list.
|
||||
|
@ -223,7 +228,7 @@ public class TestSortLocatedStripedBlock {
|
|||
dm.sortLocatedBlocks(null, lbs);
|
||||
|
||||
// After this index all are decommissioned nodes.
|
||||
int blkGrpWidth = dataBlksNum + NUM_PARITY_BLOCKS;
|
||||
int blkGrpWidth = dataBlksNum + parityBlocks;
|
||||
assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
}
|
||||
|
@ -275,7 +280,7 @@ public class TestSortLocatedStripedBlock {
|
|||
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
|
||||
lbsCount * decommnNodeIndices.size());
|
||||
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
|
||||
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
|
||||
dataBlocks, parityBlocks, decommnNodeIndices,
|
||||
targetNodeIndices, decommissionedNodes);
|
||||
|
||||
// prepare expected block index and token list.
|
||||
|
@ -288,7 +293,7 @@ public class TestSortLocatedStripedBlock {
|
|||
|
||||
// After this index all are decommissioned nodes. Needs to reconstruct two
|
||||
// more block indices.
|
||||
int blkGrpWidth = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS - 2;
|
||||
int blkGrpWidth = dataBlocks + parityBlocks - 2;
|
||||
assertDecommnNodePosition(blkGrpWidth, decommissionedNodes, lbs);
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
}
|
||||
|
@ -336,7 +341,7 @@ public class TestSortLocatedStripedBlock {
|
|||
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(
|
||||
lbsCount * decommnNodeIndices.size());
|
||||
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount,
|
||||
NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS, decommnNodeIndices,
|
||||
dataBlocks, parityBlocks, decommnNodeIndices,
|
||||
targetNodeIndices, decommissionedNodes);
|
||||
List <DatanodeInfo> staleDns = new ArrayList<>();
|
||||
for (LocatedBlock lb : lbs) {
|
||||
|
@ -355,7 +360,7 @@ public class TestSortLocatedStripedBlock {
|
|||
|
||||
dm.sortLocatedBlocks(null, lbs);
|
||||
|
||||
assertDecommnNodePosition(BLK_GROUP_WIDTH + 1, decommissionedNodes, lbs);
|
||||
assertDecommnNodePosition(groupSize + 1, decommissionedNodes, lbs);
|
||||
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
|
||||
|
||||
for (LocatedBlock lb : lbs) {
|
||||
|
@ -452,7 +457,7 @@ public class TestSortLocatedStripedBlock {
|
|||
}
|
||||
}
|
||||
// Adding parity blocks after data blocks
|
||||
index = NUM_DATA_BLOCKS;
|
||||
index = dataBlocks;
|
||||
for (int j = numDataBlk; j < numDataBlk + numParityBlk; j++, index++) {
|
||||
blkIndices[j] = (byte) index;
|
||||
// Location port always equal to logical index of a block,
|
||||
|
@ -471,7 +476,7 @@ public class TestSortLocatedStripedBlock {
|
|||
}
|
||||
}
|
||||
// Add extra target nodes to storage list after the parity blocks
|
||||
int basePortValue = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
int basePortValue = dataBlocks + parityBlocks;
|
||||
index = numDataBlk + numParityBlk;
|
||||
for (int i = 0; i < targetNodeIndices.size(); i++, index++) {
|
||||
int blkIndexPos = targetNodeIndices.get(i);
|
||||
|
@ -494,7 +499,7 @@ public class TestSortLocatedStripedBlock {
|
|||
}
|
||||
return new LocatedStripedBlock(
|
||||
new ExtendedBlock("pool", blockGroupID,
|
||||
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE, 1001),
|
||||
cellSize, 1001),
|
||||
locs, storageIDs, storageTypes, blkIndices, 0, false, null);
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -109,7 +108,7 @@ import org.mockito.stubbing.Answer;
|
|||
import com.google.common.base.Supplier;
|
||||
|
||||
/**
|
||||
* This tests if sync all replicas in block recovery works correctly
|
||||
* This tests if sync all replicas in block recovery works correctly.
|
||||
*/
|
||||
public class TestBlockRecovery {
|
||||
private static final Log LOG = LogFactory.getLog(TestBlockRecovery.class);
|
||||
|
@ -136,30 +135,30 @@ public class TestBlockRecovery {
|
|||
@Rule
|
||||
public TestName currentTestName = new TestName();
|
||||
|
||||
private static final int CELL_SIZE =
|
||||
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int bytesPerChecksum = 512;
|
||||
private static final int[][][] BLOCK_LENGTHS_SUITE = {
|
||||
{ { 11 * CELL_SIZE, 10 * CELL_SIZE, 9 * CELL_SIZE, 8 * CELL_SIZE,
|
||||
7 * CELL_SIZE, 6 * CELL_SIZE, 5 * CELL_SIZE, 4 * CELL_SIZE,
|
||||
3 * CELL_SIZE }, { 36 * CELL_SIZE } },
|
||||
private final int cellSize =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy().getCellSize();
|
||||
private final int bytesPerChecksum = 512;
|
||||
private final int[][][] blockLengthsSuite = {
|
||||
{{11 * cellSize, 10 * cellSize, 9 * cellSize, 8 * cellSize,
|
||||
7 * cellSize, 6 * cellSize, 5 * cellSize, 4 * cellSize,
|
||||
3 * cellSize}, {36 * cellSize}},
|
||||
|
||||
{ { 3 * CELL_SIZE, 4 * CELL_SIZE, 5 * CELL_SIZE, 6 * CELL_SIZE,
|
||||
7 * CELL_SIZE, 8 * CELL_SIZE, 9 * CELL_SIZE, 10 * CELL_SIZE,
|
||||
11 * CELL_SIZE }, { 36 * CELL_SIZE } },
|
||||
{{3 * cellSize, 4 * cellSize, 5 * cellSize, 6 * cellSize,
|
||||
7 * cellSize, 8 * cellSize, 9 * cellSize, 10 * cellSize,
|
||||
11 * cellSize}, {36 * cellSize}},
|
||||
|
||||
{ { 11 * CELL_SIZE, 7 * CELL_SIZE, 6 * CELL_SIZE, 5 * CELL_SIZE,
|
||||
4 * CELL_SIZE, 2 * CELL_SIZE, 9 * CELL_SIZE, 10 * CELL_SIZE,
|
||||
11 * CELL_SIZE }, { 36 * CELL_SIZE } },
|
||||
{{11 * cellSize, 7 * cellSize, 6 * cellSize, 5 * cellSize,
|
||||
4 * cellSize, 2 * cellSize, 9 * cellSize, 10 * cellSize,
|
||||
11 * cellSize}, {36 * cellSize}},
|
||||
|
||||
{{8 * cellSize + bytesPerChecksum,
|
||||
7 * cellSize + bytesPerChecksum * 2,
|
||||
6 * cellSize + bytesPerChecksum * 2,
|
||||
5 * cellSize - bytesPerChecksum * 3,
|
||||
4 * cellSize - bytesPerChecksum * 4,
|
||||
3 * cellSize - bytesPerChecksum * 4, 9 * cellSize, 10 * cellSize,
|
||||
11 * cellSize}, {36 * cellSize}}, };
|
||||
|
||||
{ { 8 * CELL_SIZE + bytesPerChecksum,
|
||||
7 * CELL_SIZE + bytesPerChecksum * 2,
|
||||
6 * CELL_SIZE + bytesPerChecksum * 2,
|
||||
5 * CELL_SIZE - bytesPerChecksum * 3,
|
||||
4 * CELL_SIZE - bytesPerChecksum * 4,
|
||||
3 * CELL_SIZE - bytesPerChecksum * 4, 9 * CELL_SIZE, 10 * CELL_SIZE,
|
||||
11 * CELL_SIZE }, { 36 * CELL_SIZE } }, };
|
||||
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(FSNamesystem.LOG, Level.ALL);
|
||||
GenericTestUtils.setLogLevel(LOG, Level.ALL);
|
||||
|
@ -807,9 +806,9 @@ public class TestBlockRecovery {
|
|||
BlockRecoveryWorker.RecoveryTaskStriped recoveryTask =
|
||||
recoveryWorker.new RecoveryTaskStriped(rBlockStriped);
|
||||
|
||||
for (int i = 0; i < BLOCK_LENGTHS_SUITE.length; i++) {
|
||||
int[] blockLengths = BLOCK_LENGTHS_SUITE[i][0];
|
||||
int safeLength = BLOCK_LENGTHS_SUITE[i][1][0];
|
||||
for (int i = 0; i < blockLengthsSuite.length; i++) {
|
||||
int[] blockLengths = blockLengthsSuite[i][0];
|
||||
int safeLength = blockLengthsSuite[i][1][0];
|
||||
Map<Long, BlockRecord> syncList = new HashMap<>();
|
||||
for (int id = 0; id < blockLengths.length; id++) {
|
||||
ReplicaRecoveryInfo rInfo = new ReplicaRecoveryInfo(id,
|
||||
|
|
|
@ -29,11 +29,13 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||
|
@ -58,15 +60,14 @@ import java.util.Arrays;
|
|||
public class TestDataNodeErasureCodingMetrics {
|
||||
public static final Log LOG = LogFactory.
|
||||
getLog(TestDataNodeErasureCodingMetrics.class);
|
||||
|
||||
private static final int DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static final int PARITY_BLK_NUM =
|
||||
StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private static final int CELLSIZE =
|
||||
StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int BLOCKSIZE = CELLSIZE;
|
||||
private static final int GROUPSIZE = DATA_BLK_NUM + PARITY_BLK_NUM;
|
||||
private static final int DN_NUM = GROUPSIZE + 1;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int blockSize = cellSize;
|
||||
private final int groupSize = dataBlocks + parityBlocks;
|
||||
private final int numDNs = groupSize + 1;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
|
@ -76,9 +77,9 @@ public class TestDataNodeErasureCodingMetrics {
|
|||
public void setup() throws IOException {
|
||||
conf = new Configuration();
|
||||
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DN_NUM).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||
cluster.waitActive();
|
||||
cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
|
||||
fs = cluster.getFileSystem();
|
||||
|
@ -130,7 +131,7 @@ public class TestDataNodeErasureCodingMetrics {
|
|||
private DataNode doTest(String fileName) throws Exception {
|
||||
|
||||
Path file = new Path(fileName);
|
||||
long fileLen = DATA_BLK_NUM * BLOCKSIZE;
|
||||
long fileLen = dataBlocks * blockSize;
|
||||
final byte[] data = StripedFileTestUtil.generateBytes((int) fileLen);
|
||||
DFSTestUtil.writeFile(fs, file, data);
|
||||
StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
|
||||
|
@ -142,7 +143,7 @@ public class TestDataNodeErasureCodingMetrics {
|
|||
(LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
|
||||
DataNode workerDn = null;
|
||||
DatanodeInfo[] locations = lastBlock.getLocations();
|
||||
assertEquals(locations.length, GROUPSIZE);
|
||||
assertEquals(locations.length, groupSize);
|
||||
|
||||
// we have ONE extra datanode in addition to the GROUPSIZE datanodes, here
|
||||
// is to find the extra datanode that the reconstruction task will run on,
|
||||
|
@ -178,7 +179,7 @@ public class TestDataNodeErasureCodingMetrics {
|
|||
int workCount = getComputedDatanodeWork();
|
||||
assertTrue("Wrongly computed block reconstruction work", workCount > 0);
|
||||
cluster.triggerHeartbeats();
|
||||
StripedFileTestUtil.waitForReconstructionFinished(file, fs, GROUPSIZE);
|
||||
StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize);
|
||||
|
||||
return workerDn;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
|||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -72,6 +73,7 @@ import org.apache.hadoop.hdfs.server.balancer.ExitStatus;
|
|||
import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
|
||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.http.HttpConfig;
|
||||
import org.apache.hadoop.minikdc.MiniKdc;
|
||||
|
@ -469,14 +471,16 @@ public class TestMover {
|
|||
}
|
||||
}
|
||||
|
||||
int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final static int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final static int stripesPerBlock = 4;
|
||||
static int DEFAULT_STRIPE_BLOCK_SIZE = cellSize * stripesPerBlock;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int defaultBlockSize = cellSize * stripesPerBlock;
|
||||
|
||||
static void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_STRIPE_BLOCK_SIZE);
|
||||
void initConfWithStripe(Configuration conf) {
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
|
||||
|
@ -490,7 +494,7 @@ public class TestMover {
|
|||
// start 10 datanodes
|
||||
int numOfDatanodes =10;
|
||||
int storagesPerDatanode=2;
|
||||
long capacity = 10 * DEFAULT_STRIPE_BLOCK_SIZE;
|
||||
long capacity = 10 * defaultBlockSize;
|
||||
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
|
||||
for (int i = 0; i < numOfDatanodes; i++) {
|
||||
for(int j=0;j<storagesPerDatanode;j++){
|
||||
|
@ -529,7 +533,7 @@ public class TestMover {
|
|||
|
||||
// write file to barDir
|
||||
final String fooFile = "/bar/foo";
|
||||
long fileLen = 20 * DEFAULT_STRIPE_BLOCK_SIZE ;
|
||||
long fileLen = 20 * defaultBlockSize;
|
||||
DFSTestUtil.createFile(cluster.getFileSystem(), new Path(fooFile),
|
||||
fileLen,(short) 3, 0);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
|
@ -54,13 +55,15 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
private DistributedFileSystem fs;
|
||||
private final Path dirPath = new Path("/striped");
|
||||
private Path filePath = new Path(dirPath, "file");
|
||||
private final short DATA_BLK_NUM = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private final short PARITY_BLK_NUM = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final short GROUP_SIZE = (short) (DATA_BLK_NUM + PARITY_BLK_NUM);
|
||||
private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int NUM_STRIPE_PER_BLOCK = 4;
|
||||
private final int BLOCK_SIZE = NUM_STRIPE_PER_BLOCK * CELLSIZE;
|
||||
private final int numDNs = GROUP_SIZE + 3;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final short groupSize = (short) (dataBlocks + parityBlocks);
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 4;
|
||||
private final int blockSize = stripesPerBlock * cellSize;
|
||||
private final int numDNs = groupSize + 3;
|
||||
|
||||
@Rule
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
@ -68,7 +71,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
@Before
|
||||
public void setup() throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
// disable block recovery
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
|
@ -92,17 +95,17 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
@Test
|
||||
public void testProcessOverReplicatedStripedBlock() throws Exception {
|
||||
// create a file which has exact one block group to the first GROUP_SIZE DNs
|
||||
long fileLen = DATA_BLK_NUM * BLOCK_SIZE;
|
||||
long fileLen = dataBlocks * blockSize;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, 1,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), 0, fileLen);
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
long gs = bg.getBlock().getGenerationStamp();
|
||||
String bpid = bg.getBlock().getBlockPoolId();
|
||||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
Block blk = new Block(groupId, blockSize, gs);
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
blk.setBlockId(groupId + i);
|
||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||
}
|
||||
|
@ -113,7 +116,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
|
||||
cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
|
||||
// let a internal block be over replicated with 1 redundant block.
|
||||
blk.setBlockId(groupId + DATA_BLK_NUM);
|
||||
blk.setBlockId(groupId + dataBlocks);
|
||||
cluster.injectBlocks(numDNs - 1, Arrays.asList(blk), bpid);
|
||||
|
||||
// update blocksMap
|
||||
|
@ -128,14 +131,14 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
// verify that all internal blocks exists
|
||||
lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), 0, fileLen);
|
||||
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE);
|
||||
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProcessOverReplicatedSBSmallerThanFullBlocks()
|
||||
throws Exception {
|
||||
// Create a EC file which doesn't fill full internal blocks.
|
||||
int fileLen = CELLSIZE * (DATA_BLK_NUM - 1);
|
||||
int fileLen = cellSize * (dataBlocks - 1);
|
||||
byte[] content = new byte[fileLen];
|
||||
DFSTestUtil.writeFile(fs, filePath, new String(content));
|
||||
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
|
@ -144,7 +147,7 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
long gs = bg.getBlock().getGenerationStamp();
|
||||
String bpid = bg.getBlock().getBlockPoolId();
|
||||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||
Block blk = new Block(groupId, blockSize, gs);
|
||||
cluster.triggerBlockReports();
|
||||
List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
|
||||
|
||||
|
@ -171,25 +174,25 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
// verify that all internal blocks exists
|
||||
lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), 0, fileLen);
|
||||
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, GROUP_SIZE - 1);
|
||||
StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProcessOverReplicatedAndCorruptStripedBlock()
|
||||
throws Exception {
|
||||
long fileLen = DATA_BLK_NUM * BLOCK_SIZE;
|
||||
long fileLen = dataBlocks * blockSize;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, 1,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), 0, fileLen);
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
long gs = bg.getBlock().getGenerationStamp();
|
||||
String bpid = bg.getBlock().getBlockPoolId();
|
||||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||
Block blk = new Block(groupId, blockSize, gs);
|
||||
BlockInfoStriped blockInfo = new BlockInfoStriped(blk,
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy());
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
blk.setBlockId(groupId + i);
|
||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||
}
|
||||
|
@ -225,14 +228,14 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0,
|
||||
fileLen);
|
||||
bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
assertEquals(GROUP_SIZE + 1, bg.getBlockIndices().length);
|
||||
assertEquals(GROUP_SIZE + 1, bg.getLocations().length);
|
||||
BitSet set = new BitSet(GROUP_SIZE);
|
||||
assertEquals(groupSize + 1, bg.getBlockIndices().length);
|
||||
assertEquals(groupSize + 1, bg.getLocations().length);
|
||||
BitSet set = new BitSet(groupSize);
|
||||
for (byte index : bg.getBlockIndices()) {
|
||||
set.set(index);
|
||||
}
|
||||
Assert.assertFalse(set.get(0));
|
||||
for (int i = 1; i < GROUP_SIZE; i++) {
|
||||
for (int i = 1; i < groupSize; i++) {
|
||||
assertTrue(set.get(i));
|
||||
}
|
||||
}
|
||||
|
@ -243,18 +246,18 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
@Test
|
||||
public void testProcessOverReplicatedAndMissingStripedBlock()
|
||||
throws Exception {
|
||||
long fileLen = CELLSIZE * DATA_BLK_NUM;
|
||||
long fileLen = cellSize * dataBlocks;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, 1,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
stripesPerBlock, false);
|
||||
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(
|
||||
filePath.toString(), 0, fileLen);
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
long gs = bg.getBlock().getGenerationStamp();
|
||||
String bpid = bg.getBlock().getBlockPoolId();
|
||||
long groupId = bg.getBlock().getBlockId();
|
||||
Block blk = new Block(groupId, BLOCK_SIZE, gs);
|
||||
Block blk = new Block(groupId, blockSize, gs);
|
||||
// only inject GROUP_SIZE - 1 blocks, so there is one block missing
|
||||
for (int i = 0; i < GROUP_SIZE - 1; i++) {
|
||||
for (int i = 0; i < groupSize - 1; i++) {
|
||||
blk.setBlockId(groupId + i);
|
||||
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
|
||||
}
|
||||
|
@ -282,14 +285,14 @@ public class TestAddOverReplicatedStripedBlocks {
|
|||
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0,
|
||||
fileLen);
|
||||
bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
assertEquals(GROUP_SIZE + 1, bg.getBlockIndices().length);
|
||||
assertEquals(GROUP_SIZE + 1, bg.getLocations().length);
|
||||
BitSet set = new BitSet(GROUP_SIZE);
|
||||
assertEquals(groupSize + 1, bg.getBlockIndices().length);
|
||||
assertEquals(groupSize + 1, bg.getLocations().length);
|
||||
BitSet set = new BitSet(groupSize);
|
||||
for (byte index : bg.getBlockIndices()) {
|
||||
set.set(index);
|
||||
}
|
||||
Assert.assertFalse(set.get(GROUP_SIZE - 1));
|
||||
for (int i = 0; i < GROUP_SIZE - 1; i++) {
|
||||
Assert.assertFalse(set.get(groupSize - 1));
|
||||
for (int i = 0; i < groupSize - 1; i++) {
|
||||
assertTrue(set.get(i));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||
|
@ -41,12 +42,13 @@ import org.mockito.internal.util.reflection.Whitebox;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
|
||||
public class TestAddStripedBlockInFBR {
|
||||
private final short GROUP_SIZE = (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final short groupSize = (short) (dataBlocks + parityBlocks);
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
|
@ -57,7 +59,7 @@ public class TestAddStripedBlockInFBR {
|
|||
@Before
|
||||
public void setup() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
@ -87,14 +89,14 @@ public class TestAddStripedBlockInFBR {
|
|||
dfs.getClient().setErasureCodingPolicy(ecDir.toString(), null);
|
||||
|
||||
// create several non-EC files and one EC file
|
||||
final Path[] repFiles = new Path[GROUP_SIZE];
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
final Path[] repFiles = new Path[groupSize];
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
repFiles[i] = new Path(repDir, "f" + i);
|
||||
DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
|
||||
}
|
||||
final Path ecFile = new Path(ecDir, "f");
|
||||
DFSTestUtil.createFile(dfs, ecFile,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, (short) 1, 0L);
|
||||
cellSize * dataBlocks, (short) 1, 0L);
|
||||
|
||||
// trigger dn's FBR. The FBR will add block-dn mapping.
|
||||
DataNodeTestUtils.triggerBlockReport(dn);
|
||||
|
@ -103,7 +105,7 @@ public class TestAddStripedBlockInFBR {
|
|||
BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem()
|
||||
.getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
|
||||
NumberReplicas nr = spy.countNodes(blockInfo);
|
||||
Assert.assertEquals(GROUP_SIZE, nr.liveReplicas());
|
||||
Assert.assertEquals(groupSize, nr.liveReplicas());
|
||||
Assert.assertEquals(0, nr.excessReplicas());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,11 +24,11 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
|
@ -63,13 +63,16 @@ import java.util.List;
|
|||
import java.util.UUID;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TestAddStripedBlocks {
|
||||
private final short GROUP_SIZE = (short) (StripedFileTestUtil.NUM_DATA_BLOCKS +
|
||||
StripedFileTestUtil.NUM_PARITY_BLOCKS);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short groupSize = (short) (ecPolicy.getNumDataUnits() +
|
||||
ecPolicy.getNumParityUnits());
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
|
@ -80,7 +83,7 @@ public class TestAddStripedBlocks {
|
|||
@Before
|
||||
public void setup() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
|
||||
.numDataNodes(GROUP_SIZE).build();
|
||||
.numDataNodes(groupSize).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
dfs.getClient().setErasureCodingPolicy("/", null);
|
||||
|
@ -206,16 +209,15 @@ public class TestAddStripedBlocks {
|
|||
boolean checkReplica) {
|
||||
assertEquals(0, block.numNodes());
|
||||
Assert.assertFalse(block.isComplete());
|
||||
Assert.assertEquals(StripedFileTestUtil.NUM_DATA_BLOCKS, block.getDataBlockNum());
|
||||
Assert.assertEquals(StripedFileTestUtil.NUM_PARITY_BLOCKS,
|
||||
block.getParityBlockNum());
|
||||
Assert.assertEquals(dataBlocks, block.getDataBlockNum());
|
||||
Assert.assertEquals(parityBlocks, block.getParityBlockNum());
|
||||
Assert.assertEquals(0,
|
||||
block.getBlockId() & HdfsServerConstants.BLOCK_GROUP_INDEX_MASK);
|
||||
|
||||
Assert.assertEquals(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION,
|
||||
block.getBlockUCState());
|
||||
if (checkReplica) {
|
||||
Assert.assertEquals(GROUP_SIZE,
|
||||
Assert.assertEquals(groupSize,
|
||||
block.getUnderConstructionFeature().getNumExpectedLocations());
|
||||
DatanodeStorageInfo[] storages = block.getUnderConstructionFeature()
|
||||
.getExpectedStorageLocations();
|
||||
|
@ -259,8 +261,8 @@ public class TestAddStripedBlocks {
|
|||
Assert.assertTrue(lblk instanceof LocatedStripedBlock);
|
||||
DatanodeInfo[] datanodes = lblk.getLocations();
|
||||
byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
|
||||
Assert.assertEquals(GROUP_SIZE, datanodes.length);
|
||||
Assert.assertEquals(GROUP_SIZE, blockIndices.length);
|
||||
Assert.assertEquals(groupSize, datanodes.length);
|
||||
Assert.assertEquals(groupSize, blockIndices.length);
|
||||
Assert.assertArrayEquals(indices, blockIndices);
|
||||
Assert.assertArrayEquals(expectedDNs, datanodes);
|
||||
} finally {
|
||||
|
@ -291,8 +293,8 @@ public class TestAddStripedBlocks {
|
|||
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature()
|
||||
.getExpectedStorageLocations();
|
||||
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
|
||||
Assert.assertEquals(GROUP_SIZE, locs.length);
|
||||
Assert.assertEquals(GROUP_SIZE, indices.length);
|
||||
Assert.assertEquals(groupSize, locs.length);
|
||||
Assert.assertEquals(groupSize, indices.length);
|
||||
|
||||
// 2. mimic incremental block reports and make sure the uc-replica list in
|
||||
// the BlockInfoUCStriped is correct
|
||||
|
@ -314,8 +316,8 @@ public class TestAddStripedBlocks {
|
|||
// make sure lastBlock is correct and the storages have been updated
|
||||
locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
|
||||
indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
|
||||
Assert.assertEquals(GROUP_SIZE, locs.length);
|
||||
Assert.assertEquals(GROUP_SIZE, indices.length);
|
||||
Assert.assertEquals(groupSize, locs.length);
|
||||
Assert.assertEquals(groupSize, indices.length);
|
||||
for (DatanodeStorageInfo newstorage : locs) {
|
||||
Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
|
||||
}
|
||||
|
@ -330,7 +332,7 @@ public class TestAddStripedBlocks {
|
|||
INodeFile fileNode = cluster.getNamesystem().getFSDirectory()
|
||||
.getINode4Write(file.toString()).asFile();
|
||||
BlockInfo lastBlock = fileNode.getLastBlock();
|
||||
int i = GROUP_SIZE - 1;
|
||||
int i = groupSize - 1;
|
||||
for (DataNode dn : cluster.getDataNodes()) {
|
||||
String storageID = storageIDs.get(i);
|
||||
final Block block = new Block(lastBlock.getBlockId() + i--,
|
||||
|
@ -351,12 +353,12 @@ public class TestAddStripedBlocks {
|
|||
DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature()
|
||||
.getExpectedStorageLocations();
|
||||
byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
|
||||
Assert.assertEquals(GROUP_SIZE, locs.length);
|
||||
Assert.assertEquals(GROUP_SIZE, indices.length);
|
||||
for (i = 0; i < GROUP_SIZE; i++) {
|
||||
Assert.assertEquals(groupSize, locs.length);
|
||||
Assert.assertEquals(groupSize, indices.length);
|
||||
for (i = 0; i < groupSize; i++) {
|
||||
Assert.assertEquals(storageIDs.get(i),
|
||||
locs[GROUP_SIZE - 1 - i].getStorageID());
|
||||
Assert.assertEquals(GROUP_SIZE - i - 1, indices[i]);
|
||||
locs[groupSize - 1 - i].getStorageID());
|
||||
Assert.assertEquals(groupSize - i - 1, indices[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -380,7 +382,7 @@ public class TestAddStripedBlocks {
|
|||
// Now send a block report with correct size
|
||||
DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
|
||||
final Block reported = new Block(stored);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE);
|
||||
reported.setNumBytes(numStripes * cellSize);
|
||||
StorageReceivedDeletedBlocks[] reports = DFSTestUtil
|
||||
.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
|
@ -391,7 +393,7 @@ public class TestAddStripedBlocks {
|
|||
|
||||
// Now send a block report with wrong size
|
||||
reported.setBlockId(stored.getBlockId() + 1);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE - 1);
|
||||
reported.setNumBytes(numStripes * cellSize - 1);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -400,8 +402,8 @@ public class TestAddStripedBlocks {
|
|||
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
|
||||
|
||||
// Now send a parity block report with correct size
|
||||
reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE);
|
||||
reported.setBlockId(stored.getBlockId() + dataBlocks);
|
||||
reported.setNumBytes(numStripes * cellSize);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -410,8 +412,8 @@ public class TestAddStripedBlocks {
|
|||
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
|
||||
|
||||
// Now send a parity block report with wrong size
|
||||
reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE + 1);
|
||||
reported.setBlockId(stored.getBlockId() + dataBlocks);
|
||||
reported.setNumBytes(numStripes * cellSize + 1);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -425,8 +427,8 @@ public class TestAddStripedBlocks {
|
|||
// Now change the size of stored block, and test verifying the last
|
||||
// block size
|
||||
stored.setNumBytes(stored.getNumBytes() + 10);
|
||||
reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS + 2);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE);
|
||||
reported.setBlockId(stored.getBlockId() + dataBlocks + 2);
|
||||
reported.setNumBytes(numStripes * cellSize);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -438,9 +440,9 @@ public class TestAddStripedBlocks {
|
|||
// Now send a parity block report with correct size based on adjusted
|
||||
// size of stored block
|
||||
/** Now stored block has {@link numStripes} full stripes + a cell + 10 */
|
||||
stored.setNumBytes(stored.getNumBytes() + BLOCK_STRIPED_CELL_SIZE);
|
||||
stored.setNumBytes(stored.getNumBytes() + cellSize);
|
||||
reported.setBlockId(stored.getBlockId());
|
||||
reported.setNumBytes((numStripes + 1) * BLOCK_STRIPED_CELL_SIZE);
|
||||
reported.setNumBytes((numStripes + 1) * cellSize);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -450,7 +452,7 @@ public class TestAddStripedBlocks {
|
|||
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
|
||||
|
||||
reported.setBlockId(stored.getBlockId() + 1);
|
||||
reported.setNumBytes(numStripes * BLOCK_STRIPED_CELL_SIZE + 10);
|
||||
reported.setNumBytes(numStripes * cellSize + 10);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
@ -459,8 +461,8 @@ public class TestAddStripedBlocks {
|
|||
Assert.assertEquals(1, ns.getCorruptReplicaBlocks());
|
||||
Assert.assertEquals(3, bm.getCorruptReplicas(stored).size());
|
||||
|
||||
reported.setBlockId(stored.getBlockId() + NUM_DATA_BLOCKS);
|
||||
reported.setNumBytes((numStripes + 1) * BLOCK_STRIPED_CELL_SIZE);
|
||||
reported.setBlockId(stored.getBlockId() + dataBlocks);
|
||||
reported.setNumBytes((numStripes + 1) * cellSize);
|
||||
reports = DFSTestUtil.makeReportForReceivedBlock(reported,
|
||||
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, storage);
|
||||
ns.processIncrementalBlockReport(
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
|||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
|
@ -99,8 +98,8 @@ public class TestFSEditLogLoader {
|
|||
|
||||
private static final int NUM_DATA_NODES = 0;
|
||||
|
||||
private static final ErasureCodingPolicy testECPolicy
|
||||
= StripedFileTestUtil.TEST_EC_POLICY;
|
||||
private final ErasureCodingPolicy testECPolicy
|
||||
= ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
|
||||
@Test
|
||||
public void testDisplayRecentEditLogOpCodes() throws IOException {
|
||||
|
@ -474,8 +473,8 @@ public class TestFSEditLogLoader {
|
|||
long blkId = 1;
|
||||
long blkNumBytes = 1024;
|
||||
long timestamp = 1426222918;
|
||||
short blockNum = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
short parityNum = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
short blockNum = (short) testECPolicy.getNumDataUnits();
|
||||
short parityNum = (short) testECPolicy.getNumParityUnits();
|
||||
|
||||
//set the storage policy of the directory
|
||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||
|
@ -547,8 +546,8 @@ public class TestFSEditLogLoader {
|
|||
long blkId = 1;
|
||||
long blkNumBytes = 1024;
|
||||
long timestamp = 1426222918;
|
||||
short blockNum = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
short parityNum = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
short blockNum = (short) testECPolicy.getNumDataUnits();
|
||||
short parityNum = (short) testECPolicy.getNumParityUnits();
|
||||
|
||||
//set the storage policy of the directory
|
||||
fs.mkdir(new Path(testDir), new FsPermission("755"));
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -46,11 +45,12 @@ import java.io.IOException;
|
|||
public class TestQuotaWithStripedBlocks {
|
||||
private static final int BLOCK_SIZE = 1024 * 1024;
|
||||
private static final long DISK_QUOTA = BLOCK_SIZE * 10;
|
||||
private static final ErasureCodingPolicy ecPolicy =
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private static final int NUM_DATA_BLOCKS = ecPolicy.getNumDataUnits();
|
||||
private static final int NUM_PARITY_BLOCKS = ecPolicy.getNumParityUnits();
|
||||
private static final int GROUP_SIZE = NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS;
|
||||
private final int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private final int parityBlocsk = ecPolicy.getNumParityUnits();
|
||||
private final int groupSize = dataBlocks + parityBlocsk;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private static final Path ecDir = new Path("/ec");
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
|
@ -64,7 +64,7 @@ public class TestQuotaWithStripedBlocks {
|
|||
public void setUp() throws IOException {
|
||||
final Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
|
||||
cluster.waitActive();
|
||||
|
||||
dir = cluster.getNamesystem().getFSDirectory();
|
||||
|
@ -109,8 +109,8 @@ public class TestQuotaWithStripedBlocks {
|
|||
final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
|
||||
// When we add a new block we update the quota using the full block size.
|
||||
Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, spaceUsed);
|
||||
Assert.assertEquals(BLOCK_SIZE * GROUP_SIZE, diskUsed);
|
||||
Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed);
|
||||
Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed);
|
||||
|
||||
dfs.getClient().getNamenode().complete(file.toString(),
|
||||
dfs.getClient().getClientName(), previous, fileNode.getId());
|
||||
|
@ -120,9 +120,9 @@ public class TestQuotaWithStripedBlocks {
|
|||
final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature()
|
||||
.getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
|
||||
// In this case the file's real size is cell size * block group size.
|
||||
Assert.assertEquals(StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE,
|
||||
Assert.assertEquals(cellSize * groupSize,
|
||||
actualSpaceUsed);
|
||||
Assert.assertEquals(StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE * GROUP_SIZE,
|
||||
Assert.assertEquals(cellSize * groupSize,
|
||||
actualDiskUsed);
|
||||
} finally {
|
||||
IOUtils.cleanup(null, out);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
|
@ -51,9 +52,6 @@ import org.slf4j.LoggerFactory;
|
|||
import java.util.BitSet;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -61,9 +59,14 @@ import static org.junit.Assert.assertTrue;
|
|||
public class TestReconstructStripedBlocks {
|
||||
public static final Logger LOG = LoggerFactory.getLogger(
|
||||
TestReconstructStripedBlocks.class);
|
||||
private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final short GROUP_SIZE =
|
||||
(short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final short groupSize = (short) (dataBlocks + parityBlocks);
|
||||
private final int blockSize = 4 * cellSize;
|
||||
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private final Path dirPath = new Path("/dir");
|
||||
|
@ -88,7 +91,7 @@ public class TestReconstructStripedBlocks {
|
|||
|
||||
@Test
|
||||
public void testMissingStripedBlockWithBusyNode() throws Exception {
|
||||
for (int i = 1; i <= NUM_PARITY_BLOCKS; i++) {
|
||||
for (int i = 1; i <= parityBlocks; i++) {
|
||||
doTestMissingStripedBlock(i, 1);
|
||||
}
|
||||
}
|
||||
|
@ -105,7 +108,7 @@ public class TestReconstructStripedBlocks {
|
|||
throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
initConf(conf);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 1)
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 1)
|
||||
.build();
|
||||
|
||||
try {
|
||||
|
@ -126,10 +129,10 @@ public class TestReconstructStripedBlocks {
|
|||
for (BlockInfo blk : blocks) {
|
||||
assertTrue(blk.isStriped());
|
||||
assertTrue(blk.isComplete());
|
||||
assertEquals(BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS,
|
||||
assertEquals(cellSize * dataBlocks,
|
||||
blk.getNumBytes());
|
||||
final BlockInfoStriped sb = (BlockInfoStriped) blk;
|
||||
assertEquals(GROUP_SIZE, sb.numNodes());
|
||||
assertEquals(groupSize, sb.numNodes());
|
||||
}
|
||||
|
||||
final BlockManager bm = cluster.getNamesystem().getBlockManager();
|
||||
|
@ -156,7 +159,7 @@ public class TestReconstructStripedBlocks {
|
|||
BlockManagerTestUtil.getComputedDatanodeWork(bm);
|
||||
|
||||
// all the reconstruction work will be scheduled on the last DN
|
||||
DataNode lastDn = cluster.getDataNodes().get(GROUP_SIZE);
|
||||
DataNode lastDn = cluster.getDataNodes().get(groupSize);
|
||||
DatanodeDescriptor last =
|
||||
bm.getDatanodeManager().getDatanode(lastDn.getDatanodeId());
|
||||
assertEquals("Counting the number of outstanding EC tasks", numBlocks,
|
||||
|
@ -168,15 +171,15 @@ public class TestReconstructStripedBlocks {
|
|||
assertEquals(last, info.getTargetDnInfos()[0]);
|
||||
assertEquals(info.getSourceDnInfos().length,
|
||||
info.getLiveBlockIndices().length);
|
||||
if (GROUP_SIZE - numOfMissed == NUM_DATA_BLOCKS) {
|
||||
if (groupSize - numOfMissed == dataBlocks) {
|
||||
// It's a QUEUE_HIGHEST_PRIORITY block, so the busy DNs will be chosen
|
||||
// to make sure we have NUM_DATA_BLOCKS DNs to do reconstruction
|
||||
// work.
|
||||
assertEquals(NUM_DATA_BLOCKS, info.getSourceDnInfos().length);
|
||||
assertEquals(dataBlocks, info.getSourceDnInfos().length);
|
||||
} else {
|
||||
// The block has no highest priority, so we don't use the busy DNs as
|
||||
// sources
|
||||
assertEquals(GROUP_SIZE - numOfMissed - numOfBusy,
|
||||
assertEquals(groupSize - numOfMissed - numOfBusy,
|
||||
info.getSourceDnInfos().length);
|
||||
}
|
||||
}
|
||||
|
@ -190,15 +193,15 @@ public class TestReconstructStripedBlocks {
|
|||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1000);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, StripedFileTestUtil.blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 2)
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
|
||||
.build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
BlockManager bm = cluster.getNamesystem().getBlockManager();
|
||||
fs.getClient().setErasureCodingPolicy("/", null);
|
||||
int fileLen = NUM_DATA_BLOCKS * StripedFileTestUtil.blockSize;
|
||||
int fileLen = dataBlocks * blockSize;
|
||||
Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
|
||||
final byte[] data = new byte[fileLen];
|
||||
DFSTestUtil.writeFile(fs, p, data);
|
||||
|
@ -206,7 +209,7 @@ public class TestReconstructStripedBlocks {
|
|||
LocatedStripedBlock lb = (LocatedStripedBlock)fs.getClient()
|
||||
.getLocatedBlocks(p.toString(), 0).get(0);
|
||||
LocatedBlock[] lbs = StripedBlockUtil.parseStripedBlockGroup(lb,
|
||||
cellSize, NUM_DATA_BLOCKS, NUM_PARITY_BLOCKS);
|
||||
cellSize, dataBlocks, parityBlocks);
|
||||
|
||||
assertEquals(0, getNumberOfBlocksToBeErasureCoded(cluster));
|
||||
assertEquals(0, bm.getPendingReconstructionBlocksCount());
|
||||
|
@ -255,7 +258,7 @@ public class TestReconstructStripedBlocks {
|
|||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||
false);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE + 2)
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize + 2)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
|
@ -264,7 +267,7 @@ public class TestReconstructStripedBlocks {
|
|||
fs.mkdirs(dirPath);
|
||||
fs.setErasureCodingPolicy(dirPath, null);
|
||||
DFSTestUtil.createFile(fs, filePath,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS * 2, (short) 1, 0L);
|
||||
cellSize * dataBlocks * 2, (short) 1, 0L);
|
||||
|
||||
// stop a dn
|
||||
LocatedBlocks blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
|
||||
|
@ -275,7 +278,7 @@ public class TestReconstructStripedBlocks {
|
|||
cluster.setDataNodeDead(dnToStop);
|
||||
|
||||
// wait for reconstruction to happen
|
||||
DFSTestUtil.waitForReplication(fs, filePath, GROUP_SIZE, 15 * 1000);
|
||||
DFSTestUtil.waitForReplication(fs, filePath, groupSize, 15 * 1000);
|
||||
|
||||
// bring the dn back: 10 internal blocks now
|
||||
cluster.restartDataNode(dnProp);
|
||||
|
@ -304,7 +307,7 @@ public class TestReconstructStripedBlocks {
|
|||
// check if NN can detect the missing internal block and finish the
|
||||
// reconstruction
|
||||
StripedFileTestUtil.waitForReconstructionFinished(filePath, fs,
|
||||
GROUP_SIZE);
|
||||
groupSize);
|
||||
boolean reconstructed = false;
|
||||
for (int i = 0; i < 5; i++) {
|
||||
NumberReplicas num = null;
|
||||
|
@ -316,7 +319,7 @@ public class TestReconstructStripedBlocks {
|
|||
} finally {
|
||||
fsn.readUnlock();
|
||||
}
|
||||
if (num.liveReplicas() >= GROUP_SIZE) {
|
||||
if (num.liveReplicas() >= groupSize) {
|
||||
reconstructed = true;
|
||||
break;
|
||||
} else {
|
||||
|
@ -327,11 +330,11 @@ public class TestReconstructStripedBlocks {
|
|||
|
||||
blks = fs.getClient().getLocatedBlocks(filePath.toString(), 0);
|
||||
block = (LocatedStripedBlock) blks.getLastLocatedBlock();
|
||||
BitSet bitSet = new BitSet(GROUP_SIZE);
|
||||
BitSet bitSet = new BitSet(groupSize);
|
||||
for (byte index : block.getBlockIndices()) {
|
||||
bitSet.set(index);
|
||||
}
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
Assert.assertTrue(bitSet.get(i));
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.NameNodeProxies;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
|
@ -305,8 +304,7 @@ public class TestStripedINodeFile {
|
|||
public void testUnsuitableStoragePoliciesWithECStripedMode()
|
||||
throws Exception {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
int defaultStripedBlockSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE
|
||||
* 4;
|
||||
int defaultStripedBlockSize = testECPolicy.getCellSize() * 4;
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1L);
|
||||
|
|
|
@ -31,30 +31,33 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestOfflineImageViewerWithStripedBlocks {
|
||||
private static int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
private static int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemDefaultPolicy();
|
||||
private int dataBlocks = ecPolicy.getNumDataUnits();
|
||||
private int parityBlocks = ecPolicy.getNumParityUnits();
|
||||
|
||||
private static MiniDFSCluster cluster;
|
||||
private static DistributedFileSystem fs;
|
||||
private static final int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private static final int stripesPerBlock = 3;
|
||||
private static final int blockSize = cellSize * stripesPerBlock;
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripesPerBlock = 3;
|
||||
private final int blockSize = cellSize * stripesPerBlock;
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws IOException {
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
int numDNs = dataBlocks + parityBlocks + 2;
|
||||
Configuration conf = new Configuration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
|
@ -66,8 +69,8 @@ public class TestOfflineImageViewerWithStripedBlocks {
|
|||
fs.mkdirs(eczone);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDown() {
|
||||
@After
|
||||
public void tearDown() {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.util;
|
|||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.StripedFileTestUtil;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
|
@ -81,15 +80,15 @@ import static org.junit.Assert.assertFalse;
|
|||
*/
|
||||
public class TestStripedBlockUtil {
|
||||
// use hard coded policy - see HDFS-9816
|
||||
private final ErasureCodingPolicy EC_POLICY =
|
||||
private final ErasureCodingPolicy ecPolicy =
|
||||
ErasureCodingPolicyManager.getSystemPolicies()[0];
|
||||
private final short DATA_BLK_NUM = (short) EC_POLICY.getNumDataUnits();
|
||||
private final short PARITY_BLK_NUM = (short) EC_POLICY.getNumParityUnits();
|
||||
private final short BLK_GROUP_WIDTH = (short) (DATA_BLK_NUM + PARITY_BLK_NUM);
|
||||
private final int CELLSIZE = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
private final int FULL_STRIPE_SIZE = DATA_BLK_NUM * CELLSIZE;
|
||||
private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
|
||||
private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
|
||||
private final short groupSize = (short) (dataBlocks + parityBlocks);
|
||||
private final int cellSize = ecPolicy.getCellSize();
|
||||
private final int stripeSize = dataBlocks * cellSize;
|
||||
/** number of full stripes in a full block group */
|
||||
private final int BLK_GROUP_STRIPE_NUM = 16;
|
||||
private final int stripesPerBlock = 16;
|
||||
private final Random random = new Random();
|
||||
|
||||
private int[] blockGroupSizes;
|
||||
|
@ -101,23 +100,23 @@ public class TestStripedBlockUtil {
|
|||
|
||||
@Before
|
||||
public void setup(){
|
||||
blockGroupSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE,
|
||||
getDelta(DATA_BLK_NUM) * CELLSIZE,
|
||||
getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
|
||||
FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE),
|
||||
FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE,
|
||||
FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
|
||||
getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE,
|
||||
BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE};
|
||||
byteRangeStartOffsets = new int[] {0, getDelta(CELLSIZE), CELLSIZE - 1};
|
||||
byteRangeSizes = new int[]{1, getDelta(CELLSIZE), CELLSIZE,
|
||||
getDelta(DATA_BLK_NUM) * CELLSIZE,
|
||||
getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
|
||||
FULL_STRIPE_SIZE, FULL_STRIPE_SIZE + getDelta(CELLSIZE),
|
||||
FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE,
|
||||
FULL_STRIPE_SIZE + getDelta(DATA_BLK_NUM) * CELLSIZE + getDelta(CELLSIZE),
|
||||
getDelta(BLK_GROUP_STRIPE_NUM) * FULL_STRIPE_SIZE,
|
||||
BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE};
|
||||
blockGroupSizes = new int[]{1, getDelta(cellSize), cellSize,
|
||||
getDelta(dataBlocks) * cellSize,
|
||||
getDelta(dataBlocks) * cellSize + getDelta(cellSize),
|
||||
stripeSize, stripeSize + getDelta(cellSize),
|
||||
stripeSize + getDelta(dataBlocks) * cellSize,
|
||||
stripeSize + getDelta(dataBlocks) * cellSize + getDelta(cellSize),
|
||||
getDelta(stripesPerBlock) * stripeSize,
|
||||
stripesPerBlock * stripeSize};
|
||||
byteRangeStartOffsets = new int[] {0, getDelta(cellSize), cellSize - 1};
|
||||
byteRangeSizes = new int[]{1, getDelta(cellSize), cellSize,
|
||||
getDelta(dataBlocks) * cellSize,
|
||||
getDelta(dataBlocks) * cellSize + getDelta(cellSize),
|
||||
stripeSize, stripeSize + getDelta(cellSize),
|
||||
stripeSize + getDelta(dataBlocks) * cellSize,
|
||||
stripeSize + getDelta(dataBlocks) * cellSize + getDelta(cellSize),
|
||||
getDelta(stripesPerBlock) * stripeSize,
|
||||
stripesPerBlock * stripeSize};
|
||||
}
|
||||
|
||||
private int getDelta(int size) {
|
||||
|
@ -130,12 +129,12 @@ public class TestStripedBlockUtil {
|
|||
|
||||
private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
|
||||
final long blockGroupID = -1048576;
|
||||
DatanodeInfo[] locs = new DatanodeInfo[BLK_GROUP_WIDTH];
|
||||
String[] storageIDs = new String[BLK_GROUP_WIDTH];
|
||||
StorageType[] storageTypes = new StorageType[BLK_GROUP_WIDTH];
|
||||
byte[] indices = new byte[BLK_GROUP_WIDTH];
|
||||
for (int i = 0; i < BLK_GROUP_WIDTH; i++) {
|
||||
indices[i] = (byte) ((i + 2) % DATA_BLK_NUM);
|
||||
DatanodeInfo[] locs = new DatanodeInfo[groupSize];
|
||||
String[] storageIDs = new String[groupSize];
|
||||
StorageType[] storageTypes = new StorageType[groupSize];
|
||||
byte[] indices = new byte[groupSize];
|
||||
for (int i = 0; i < groupSize; i++) {
|
||||
indices[i] = (byte) ((i + 2) % dataBlocks);
|
||||
// Location port always equal to logical index of a block,
|
||||
// for easier verification
|
||||
locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
|
||||
|
@ -148,20 +147,21 @@ public class TestStripedBlockUtil {
|
|||
}
|
||||
|
||||
private byte[][] createInternalBlkBuffers(int bgSize) {
|
||||
byte[][] bufs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][];
|
||||
int[] pos = new int[DATA_BLK_NUM + PARITY_BLK_NUM];
|
||||
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
|
||||
byte[][] bufs = new byte[dataBlocks + parityBlocks][];
|
||||
int[] pos = new int[dataBlocks + parityBlocks];
|
||||
for (int i = 0; i < dataBlocks + parityBlocks; i++) {
|
||||
int bufSize = (int) getInternalBlockLength(
|
||||
bgSize, CELLSIZE, DATA_BLK_NUM, i);
|
||||
bgSize, cellSize, dataBlocks, i);
|
||||
bufs[i] = new byte[bufSize];
|
||||
pos[i] = 0;
|
||||
}
|
||||
int done = 0;
|
||||
while (done < bgSize) {
|
||||
Preconditions.checkState(done % CELLSIZE == 0);
|
||||
StripingCell cell = new StripingCell(EC_POLICY, CELLSIZE, done / CELLSIZE, 0);
|
||||
Preconditions.checkState(done % cellSize == 0);
|
||||
StripingCell cell =
|
||||
new StripingCell(ecPolicy, cellSize, done / cellSize, 0);
|
||||
int idxInStripe = cell.idxInStripe;
|
||||
int size = Math.min(CELLSIZE, bgSize - done);
|
||||
int size = Math.min(cellSize, bgSize - done);
|
||||
for (int i = 0; i < size; i++) {
|
||||
bufs[idxInStripe][pos[idxInStripe] + i] = hashIntToByte(done + i);
|
||||
}
|
||||
|
@ -175,11 +175,11 @@ public class TestStripedBlockUtil {
|
|||
@Test
|
||||
public void testParseDummyStripedBlock() {
|
||||
LocatedStripedBlock lsb = createDummyLocatedBlock(
|
||||
BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
|
||||
stripeSize * stripesPerBlock);
|
||||
LocatedBlock[] blocks = parseStripedBlockGroup(
|
||||
lsb, CELLSIZE, DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||
assertEquals(DATA_BLK_NUM + PARITY_BLK_NUM, blocks.length);
|
||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||
lsb, cellSize, dataBlocks, parityBlocks);
|
||||
assertEquals(dataBlocks + parityBlocks, blocks.length);
|
||||
for (int i = 0; i < dataBlocks; i++) {
|
||||
assertFalse(blocks[i].isStriped());
|
||||
assertEquals(i,
|
||||
BlockIdManager.getBlockIndex(blocks[i].getBlock().getLocalBlock()));
|
||||
|
@ -191,9 +191,9 @@ public class TestStripedBlockUtil {
|
|||
}
|
||||
|
||||
private void verifyInternalBlocks (int numBytesInGroup, int[] expected) {
|
||||
for (int i = 1; i < BLK_GROUP_WIDTH; i++) {
|
||||
for (int i = 1; i < groupSize; i++) {
|
||||
assertEquals(expected[i],
|
||||
getInternalBlockLength(numBytesInGroup, CELLSIZE, DATA_BLK_NUM, i));
|
||||
getInternalBlockLength(numBytesInGroup, cellSize, dataBlocks, i));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,38 +203,38 @@ public class TestStripedBlockUtil {
|
|||
final int delta = 10;
|
||||
|
||||
// Block group is smaller than a cell
|
||||
verifyInternalBlocks(CELLSIZE - delta,
|
||||
new int[] {CELLSIZE - delta, 0, 0, 0, 0, 0,
|
||||
CELLSIZE - delta, CELLSIZE - delta, CELLSIZE - delta});
|
||||
verifyInternalBlocks(cellSize - delta,
|
||||
new int[] {cellSize - delta, 0, 0, 0, 0, 0,
|
||||
cellSize - delta, cellSize - delta, cellSize - delta});
|
||||
|
||||
// Block group is exactly as large as a cell
|
||||
verifyInternalBlocks(CELLSIZE,
|
||||
new int[] {CELLSIZE, 0, 0, 0, 0, 0,
|
||||
CELLSIZE, CELLSIZE, CELLSIZE});
|
||||
verifyInternalBlocks(cellSize,
|
||||
new int[] {cellSize, 0, 0, 0, 0, 0,
|
||||
cellSize, cellSize, cellSize});
|
||||
|
||||
// Block group is a little larger than a cell
|
||||
verifyInternalBlocks(CELLSIZE + delta,
|
||||
new int[] {CELLSIZE, delta, 0, 0, 0, 0,
|
||||
CELLSIZE, CELLSIZE, CELLSIZE});
|
||||
verifyInternalBlocks(cellSize + delta,
|
||||
new int[] {cellSize, delta, 0, 0, 0, 0,
|
||||
cellSize, cellSize, cellSize});
|
||||
|
||||
// Block group contains multiple stripes and ends at stripe boundary
|
||||
verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE,
|
||||
new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
|
||||
2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
|
||||
2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE});
|
||||
verifyInternalBlocks(2 * dataBlocks * cellSize,
|
||||
new int[] {2 * cellSize, 2 * cellSize, 2 * cellSize,
|
||||
2 * cellSize, 2 * cellSize, 2 * cellSize,
|
||||
2 * cellSize, 2 * cellSize, 2 * cellSize});
|
||||
|
||||
// Block group contains multiple stripes and ends at cell boundary
|
||||
// (not ending at stripe boundary)
|
||||
verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE + CELLSIZE,
|
||||
new int[] {3 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
|
||||
2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
|
||||
3 * CELLSIZE, 3 * CELLSIZE, 3 * CELLSIZE});
|
||||
verifyInternalBlocks(2 * dataBlocks * cellSize + cellSize,
|
||||
new int[] {3 * cellSize, 2 * cellSize, 2 * cellSize,
|
||||
2 * cellSize, 2 * cellSize, 2 * cellSize,
|
||||
3 * cellSize, 3 * cellSize, 3 * cellSize});
|
||||
|
||||
// Block group contains multiple stripes and doesn't end at cell boundary
|
||||
verifyInternalBlocks(2 * DATA_BLK_NUM * CELLSIZE - delta,
|
||||
new int[] {2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE,
|
||||
2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE - delta,
|
||||
2 * CELLSIZE, 2 * CELLSIZE, 2 * CELLSIZE});
|
||||
verifyInternalBlocks(2 * dataBlocks * cellSize - delta,
|
||||
new int[] {2 * cellSize, 2 * cellSize, 2 * cellSize,
|
||||
2 * cellSize, 2 * cellSize, 2 * cellSize - delta,
|
||||
2 * cellSize, 2 * cellSize, 2 * cellSize});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -244,7 +244,7 @@ public class TestStripedBlockUtil {
|
|||
@Test
|
||||
public void testDivideByteRangeIntoStripes() {
|
||||
ByteBuffer assembled =
|
||||
ByteBuffer.allocate(BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
|
||||
ByteBuffer.allocate(stripesPerBlock * stripeSize);
|
||||
for (int bgSize : blockGroupSizes) {
|
||||
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
|
||||
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
|
||||
|
@ -253,11 +253,11 @@ public class TestStripedBlockUtil {
|
|||
if (brStart + brSize > bgSize) {
|
||||
continue;
|
||||
}
|
||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
|
||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled);
|
||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(ecPolicy,
|
||||
cellSize, blockGroup, brStart, brStart + brSize - 1, assembled);
|
||||
|
||||
for (AlignedStripe stripe : stripes) {
|
||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||
for (int i = 0; i < dataBlocks; i++) {
|
||||
StripingChunk chunk = stripe.chunks[i];
|
||||
if (chunk == null || chunk.state != StripingChunk.REQUESTED) {
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue