HBASE-18587 Fix flaky TestFileIOEngine
This short circuits reads and writes with 0 length and also removes flakiness in TestFileIOEngine Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
c298ab65ec
commit
49ca224fc3
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
|
||||||
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
|
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
|
||||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||||
import org.apache.hadoop.hbase.nio.SingleByteBuff;
|
import org.apache.hadoop.hbase.nio.SingleByteBuff;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -122,15 +123,18 @@ public class FileIOEngine implements IOEngine {
|
||||||
@Override
|
@Override
|
||||||
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
|
public Cacheable read(long offset, int length, CacheableDeserializer<Cacheable> deserializer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
Preconditions.checkArgument(length >= 0, "Length of read can not be less than 0.");
|
||||||
ByteBuffer dstBuffer = ByteBuffer.allocate(length);
|
ByteBuffer dstBuffer = ByteBuffer.allocate(length);
|
||||||
accessFile(readAccessor, dstBuffer, offset);
|
if (length != 0) {
|
||||||
// The buffer created out of the fileChannel is formed by copying the data from the file
|
accessFile(readAccessor, dstBuffer, offset);
|
||||||
// Hence in this case there is no shared memory that we point to. Even if the BucketCache evicts
|
// The buffer created out of the fileChannel is formed by copying the data from the file
|
||||||
// this buffer from the file the data is already copied and there is no need to ensure that
|
// Hence in this case there is no shared memory that we point to. Even if the BucketCache evicts
|
||||||
// the results are not corrupted before consuming them.
|
// this buffer from the file the data is already copied and there is no need to ensure that
|
||||||
if (dstBuffer.limit() != length) {
|
// the results are not corrupted before consuming them.
|
||||||
throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " + length
|
if (dstBuffer.limit() != length) {
|
||||||
+ " expected");
|
throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " + length
|
||||||
|
+ " expected");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, MemoryType.EXCLUSIVE);
|
return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, MemoryType.EXCLUSIVE);
|
||||||
}
|
}
|
||||||
|
@ -143,6 +147,9 @@ public class FileIOEngine implements IOEngine {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
|
public void write(ByteBuffer srcBuffer, long offset) throws IOException {
|
||||||
|
if (!srcBuffer.hasRemaining()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
accessFile(writeAccessor, srcBuffer, offset);
|
accessFile(writeAccessor, srcBuffer, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.io.hfile.bucket;
|
package org.apache.hadoop.hbase.io.hfile.bucket;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
@ -30,6 +31,8 @@ import org.apache.hadoop.hbase.io.hfile.bucket.TestByteBufferIOEngine.BufferGrab
|
||||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||||
import org.apache.hadoop.hbase.testclassification.IOTests;
|
import org.apache.hadoop.hbase.testclassification.IOTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@ -38,62 +41,82 @@ import org.junit.experimental.categories.Category;
|
||||||
*/
|
*/
|
||||||
@Category({IOTests.class, SmallTests.class})
|
@Category({IOTests.class, SmallTests.class})
|
||||||
public class TestFileIOEngine {
|
public class TestFileIOEngine {
|
||||||
|
|
||||||
|
private static final long TOTAL_CAPACITY = 6 * 1024 * 1024; // 6 MB
|
||||||
|
private static final String[] FILE_PATHS = {"testFileIOEngine1", "testFileIOEngine2",
|
||||||
|
"testFileIOEngine3"};
|
||||||
|
private static final long SIZE_PER_FILE = TOTAL_CAPACITY / FILE_PATHS.length; // 2 MB per File
|
||||||
|
private final static List<Long> boundaryStartPositions = new ArrayList<Long>();
|
||||||
|
private final static List<Long> boundaryStopPositions = new ArrayList<Long>();
|
||||||
|
|
||||||
|
private FileIOEngine fileIOEngine;
|
||||||
|
|
||||||
|
static {
|
||||||
|
boundaryStartPositions.add(0L);
|
||||||
|
for (int i = 1; i < FILE_PATHS.length; i++) {
|
||||||
|
boundaryStartPositions.add(SIZE_PER_FILE * i - 1);
|
||||||
|
boundaryStartPositions.add(SIZE_PER_FILE * i);
|
||||||
|
boundaryStartPositions.add(SIZE_PER_FILE * i + 1);
|
||||||
|
}
|
||||||
|
for (int i = 1; i < FILE_PATHS.length; i++) {
|
||||||
|
boundaryStopPositions.add(SIZE_PER_FILE * i - 1);
|
||||||
|
boundaryStopPositions.add(SIZE_PER_FILE * i);
|
||||||
|
boundaryStopPositions.add(SIZE_PER_FILE * i + 1);
|
||||||
|
}
|
||||||
|
boundaryStopPositions.add(SIZE_PER_FILE * FILE_PATHS.length - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws IOException {
|
||||||
|
fileIOEngine = new FileIOEngine(TOTAL_CAPACITY, false, FILE_PATHS);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void cleanUp() {
|
||||||
|
fileIOEngine.shutdown();
|
||||||
|
for (String filePath : FILE_PATHS) {
|
||||||
|
File file = new File(filePath);
|
||||||
|
if (file.exists()) {
|
||||||
|
file.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFileIOEngine() throws IOException {
|
public void testFileIOEngine() throws IOException {
|
||||||
long totalCapacity = 6 * 1024 * 1024; // 6 MB
|
for (int i = 0; i < 500; i++) {
|
||||||
String[] filePaths = { "testFileIOEngine1", "testFileIOEngine2",
|
int len = (int) Math.floor(Math.random() * 100) + 1;
|
||||||
"testFileIOEngine3" };
|
long offset = (long) Math.floor(Math.random() * TOTAL_CAPACITY % (TOTAL_CAPACITY - len));
|
||||||
long sizePerFile = totalCapacity / filePaths.length; // 2 MB per File
|
if (i < boundaryStartPositions.size()) {
|
||||||
List<Long> boundaryStartPositions = new ArrayList<Long>();
|
// make the boundary start positon
|
||||||
boundaryStartPositions.add(0L);
|
offset = boundaryStartPositions.get(i);
|
||||||
for (int i = 1; i < filePaths.length; i++) {
|
} else if ((i - boundaryStartPositions.size()) < boundaryStopPositions.size()) {
|
||||||
boundaryStartPositions.add(sizePerFile * i - 1);
|
// make the boundary stop positon
|
||||||
boundaryStartPositions.add(sizePerFile * i);
|
offset = boundaryStopPositions.get(i - boundaryStartPositions.size()) - len + 1;
|
||||||
boundaryStartPositions.add(sizePerFile * i + 1);
|
} else if (i % 2 == 0) {
|
||||||
}
|
// make the cross-files block writing/reading
|
||||||
List<Long> boundaryStopPositions = new ArrayList<Long>();
|
offset = Math.max(1, i % FILE_PATHS.length) * SIZE_PER_FILE - len / 2;
|
||||||
for (int i = 1; i < filePaths.length; i++) {
|
|
||||||
boundaryStopPositions.add(sizePerFile * i - 1);
|
|
||||||
boundaryStopPositions.add(sizePerFile * i);
|
|
||||||
boundaryStopPositions.add(sizePerFile * i + 1);
|
|
||||||
}
|
|
||||||
boundaryStopPositions.add(sizePerFile * filePaths.length - 1);
|
|
||||||
FileIOEngine fileIOEngine = new FileIOEngine(totalCapacity, false, filePaths);
|
|
||||||
try {
|
|
||||||
for (int i = 0; i < 500; i++) {
|
|
||||||
int len = (int) Math.floor(Math.random() * 100);
|
|
||||||
long offset = (long) Math.floor(Math.random() * totalCapacity % (totalCapacity - len));
|
|
||||||
if (i < boundaryStartPositions.size()) {
|
|
||||||
// make the boundary start positon
|
|
||||||
offset = boundaryStartPositions.get(i);
|
|
||||||
} else if ((i - boundaryStartPositions.size()) < boundaryStopPositions.size()) {
|
|
||||||
// make the boundary stop positon
|
|
||||||
offset = boundaryStopPositions.get(i - boundaryStartPositions.size()) - len + 1;
|
|
||||||
} else if (i % 2 == 0) {
|
|
||||||
// make the cross-files block writing/reading
|
|
||||||
offset = Math.max(1, i % filePaths.length) * sizePerFile - len / 2;
|
|
||||||
}
|
|
||||||
byte[] data1 = new byte[len];
|
|
||||||
for (int j = 0; j < data1.length; ++j) {
|
|
||||||
data1[j] = (byte) (Math.random() * 255);
|
|
||||||
}
|
|
||||||
fileIOEngine.write(ByteBuffer.wrap(data1), offset);
|
|
||||||
BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
|
|
||||||
fileIOEngine.read(offset, len, deserializer);
|
|
||||||
ByteBuff data2 = deserializer.getDeserializedByteBuff();
|
|
||||||
for (int j = 0; j < data1.length; ++j) {
|
|
||||||
assertTrue(data1[j] == data2.get(j));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} finally {
|
byte[] data1 = new byte[len];
|
||||||
fileIOEngine.shutdown();
|
for (int j = 0; j < data1.length; ++j) {
|
||||||
for (String filePath : filePaths) {
|
data1[j] = (byte) (Math.random() * 255);
|
||||||
File file = new File(filePath);
|
|
||||||
if (file.exists()) {
|
|
||||||
file.delete();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
fileIOEngine.write(ByteBuffer.wrap(data1), offset);
|
||||||
|
BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
|
||||||
|
fileIOEngine.read(offset, len, deserializer);
|
||||||
|
ByteBuff data2 = deserializer.getDeserializedByteBuff();
|
||||||
|
assertArrayEquals(data1, data2.array());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFileIOEngineHandlesZeroLengthInput() throws IOException {
|
||||||
|
byte[] data1 = new byte[0];
|
||||||
|
|
||||||
|
fileIOEngine.write(ByteBuffer.wrap(data1), 0);
|
||||||
|
BufferGrabbingDeserializer deserializer = new BufferGrabbingDeserializer();
|
||||||
|
fileIOEngine.read(0, 0, deserializer);
|
||||||
|
ByteBuff data2 = deserializer.getDeserializedByteBuff();
|
||||||
|
assertArrayEquals(data1, data2.array());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue