HBASE-13381 Expand TestSizeFailures to include small scans (Josh Elser)

This commit is contained in:
tedyu 2015-04-09 17:57:26 -07:00
parent d1133ced44
commit e831ed0747
1 changed files with 104 additions and 90 deletions

View File

@ -20,12 +20,10 @@ package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.TreeSet;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -41,16 +39,17 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Maps;
@Category(LargeTests.class)
public class TestSizeFailures {
final Log LOG = LogFactory.getLog(getClass());
static final Log LOG = LogFactory.getLog(TestSizeFailures.class);
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static byte [] FAMILY = Bytes.toBytes("testFamily");
protected static int SLAVES = 1;
private static TableName TABLENAME;
private static final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10;
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Uncomment the following lines if more verbosity is needed for
@ -61,11 +60,49 @@ public class TestSizeFailures {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean("hbase.table.sanity.checks", true); // ignore sanity checks in the server
TEST_UTIL.startMiniCluster(SLAVES);
// Write a bunch of data
TABLENAME = TableName.valueOf("testSizeFailures");
List<byte[]> qualifiers = new ArrayList<>();
for (int i = 1; i <= 10; i++) {
qualifiers.add(Bytes.toBytes(Integer.toString(i)));
}
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(hcd);
byte[][] splits = new byte[9][2];
for (int i = 1; i < 10; i++) {
int split = 48 + i;
splits[i - 1][0] = (byte) (split >>> 8);
splits[i - 1][0] = (byte) (split);
}
TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
Connection conn = TEST_UTIL.getConnection();
try (Table table = conn.getTable(TABLENAME)) {
List<Put> puts = new LinkedList<>();
for (int i = 0; i < NUM_ROWS; i++) {
Put p = new Put(Bytes.toBytes(Integer.toString(i)));
for (int j = 0; j < NUM_COLS; j++) {
byte[] value = new byte[50];
Bytes.random(value);
p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value);
}
puts.add(p);
if (puts.size() == 1000) {
table.batch(puts, new Object[1000]);
puts.clear();
}
}
if (puts.size() > 0) {
table.batch(puts, new Object[puts.size()]);
}
}
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
@ -74,89 +111,66 @@ public class TestSizeFailures {
/**
* Basic client side validation of HBASE-13262
*/
@Test
public void testScannerSeesAllRecords() throws Exception {
final int NUM_ROWS = 1000 * 1000, NUM_COLS = 10;
final TableName TABLENAME = TableName.valueOf("testScannerSeesAllRecords");
List<byte[]> qualifiers = new ArrayList<>();
for (int i = 1; i <= 10; i++) {
qualifiers.add(Bytes.toBytes(Integer.toString(i)));
}
@Test
public void testScannerSeesAllRecords() throws Exception {
Connection conn = TEST_UTIL.getConnection();
try (Table table = conn.getTable(TABLENAME)) {
Scan s = new Scan();
s.addFamily(FAMILY);
s.setMaxResultSize(-1);
s.setBatch(-1);
s.setCaching(500);
Entry<Long,Long> entry = sumTable(table.getScanner(s));
long rowsObserved = entry.getKey();
long entriesObserved = entry.getValue();
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY);
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(hcd);
byte[][] splits = new byte[9][2];
for (int i = 1; i < 10; i++) {
int split = 48 + i;
splits[i - 1][0] = (byte) (split >>> 8);
splits[i - 1][0] = (byte) (split);
}
TEST_UTIL.getHBaseAdmin().createTable(desc, splits);
Connection conn = TEST_UTIL.getConnection();
// Verify that we see 1M rows and 10M cells
assertEquals(NUM_ROWS, rowsObserved);
assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
}
}
try (Table table = conn.getTable(TABLENAME)) {
List<Put> puts = new LinkedList<>();
for (int i = 0; i < NUM_ROWS; i++) {
Put p = new Put(Bytes.toBytes(Integer.toString(i)));
for (int j = 0; j < NUM_COLS; j++) {
byte[] value = new byte[50];
Bytes.random(value);
p.addColumn(FAMILY, Bytes.toBytes(Integer.toString(j)), value);
}
puts.add(p);
/**
* Basic client side validation of HBASE-13262
*/
@Test
public void testSmallScannerSeesAllRecords() throws Exception {
Connection conn = TEST_UTIL.getConnection();
try (Table table = conn.getTable(TABLENAME)) {
Scan s = new Scan();
s.setSmall(true);
s.addFamily(FAMILY);
s.setMaxResultSize(-1);
s.setBatch(-1);
s.setCaching(500);
Entry<Long,Long> entry = sumTable(table.getScanner(s));
long rowsObserved = entry.getKey();
long entriesObserved = entry.getValue();
if (puts.size() == 1000) {
Object[] results = new Object[1000];
try {
table.batch(puts, results);
} catch (IOException e) {
LOG.error("Failed to write data", e);
LOG.debug("Errors: " + Arrays.toString(results));
}
// Verify that we see 1M rows and 10M cells
assertEquals(NUM_ROWS, rowsObserved);
assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
}
}
puts.clear();
}
}
/**
* Count the number of rows and the number of entries from a scanner
*
* @param scanner
* The Scanner
* @return An entry where the first item is rows observed and the second is entries observed.
*/
private Entry<Long,Long> sumTable(ResultScanner scanner) {
long rowsObserved = 0l;
long entriesObserved = 0l;
if (puts.size() > 0) {
Object[] results = new Object[puts.size()];
try {
table.batch(puts, results);
} catch (IOException e) {
LOG.error("Failed to write data", e);
LOG.debug("Errors: " + Arrays.toString(results));
}
}
// Flush the memstore to disk
TEST_UTIL.getHBaseAdmin().flush(TABLENAME);
TreeSet<Integer> rows = new TreeSet<>();
long rowsObserved = 0l;
long entriesObserved = 0l;
Scan s = new Scan();
s.addFamily(FAMILY);
s.setMaxResultSize(-1);
s.setBatch(-1);
s.setCaching(500);
ResultScanner scanner = table.getScanner(s);
// Read all the records in the table
for (Result result : scanner) {
rowsObserved++;
String row = new String(result.getRow());
rows.add(Integer.parseInt(row));
while (result.advance()) {
entriesObserved++;
// result.current();
}
}
// Verify that we see 1M rows and 10M cells
assertEquals(NUM_ROWS, rowsObserved);
assertEquals(NUM_ROWS * NUM_COLS, entriesObserved);
}
conn.close();
}
// Read all the records in the table
for (Result result : scanner) {
rowsObserved++;
while (result.advance()) {
entriesObserved++;
}
}
return Maps.immutableEntry(rowsObserved,entriesObserved);
}
}