HBASE-15302 Reenable the other tests disabled by HBASE-14678

Signed-off-by: stack <stack@apache.org>
This commit is contained in:
Phil Yang 2016-02-22 14:17:24 +08:00 committed by stack
parent 876a6ab73e
commit 30cec72f9a
11 changed files with 5256 additions and 5 deletions

View File

@ -515,13 +515,14 @@ public class WALSplitter {
* @param fs * @param fs
* @param logEntry * @param logEntry
* @param rootDir HBase root dir. * @param rootDir HBase root dir.
* @param fileBeingSplit the file being split currently. Used to generate tmp file name. * @param fileNameBeingSplit the file being split currently. Used to generate tmp file name.
* @return Path to file into which to dump split log edits. * @return Path to file into which to dump split log edits.
* @throws IOException * @throws IOException
*/ */
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
private static Path getRegionSplitEditsPath(final FileSystem fs, @VisibleForTesting
final Entry logEntry, final Path rootDir, FileStatus fileBeingSplit) static Path getRegionSplitEditsPath(final FileSystem fs,
final Entry logEntry, final Path rootDir, String fileNameBeingSplit)
throws IOException { throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTablename()); Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTablename());
String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName()); String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName());
@ -556,7 +557,7 @@ public class WALSplitter {
// Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
// region's replayRecoveredEdits will not delete it // region's replayRecoveredEdits will not delete it
String fileName = formatRecoveredEditsFileName(logEntry.getKey().getSequenceId()); String fileName = formatRecoveredEditsFileName(logEntry.getKey().getSequenceId());
fileName = getTmpRecoveredEditsFileName(fileName + "-" + fileBeingSplit.getPath().getName()); fileName = getTmpRecoveredEditsFileName(fileName + "-" + fileNameBeingSplit);
return new Path(dir, fileName); return new Path(dir, fileName);
} }
@ -1518,7 +1519,7 @@ public class WALSplitter {
* @return a path with a write for that path. caller should close. * @return a path with a write for that path. caller should close.
*/ */
private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir) throws IOException { private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir) throws IOException {
Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, fileBeingSplit); Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, fileBeingSplit.getPath().getName());
if (regionedits == null) { if (regionedits == null) {
return null; return null;
} }

View File

@ -0,0 +1,832 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.client.ClientScanner;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
import org.apache.hadoop.hbase.filter.FirstKeyValueMatchingQualifiersFilter;
import org.apache.hadoop.hbase.filter.RandomRowFilter;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* These tests are focused on testing how partial results appear to a client. Partial results are
* {@link Result}s that contain only a portion of a row's complete list of cells. Partial results
* are formed when the server breaches its maximum result size when trying to service a client's RPC
* request. It is the responsibility of the scanner on the client side to recognize when partial
* results have been returned and to take action to form the complete results.
* <p>
* Unless the flag {@link Scan#setAllowPartialResults(boolean)} has been set to true, the caller of
* {@link ResultScanner#next()} should never see partial results.
*/
@Category(MediumTests.class)
public class TestPartialResultsFromClientSide {
private static final Log LOG = LogFactory.getLog(TestPartialResultsFromClientSide.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Table TABLE = null;
/**
* Table configuration
*/
private static TableName TABLE_NAME = TableName.valueOf("testTable");
private static int NUM_ROWS = 5;
private static byte[] ROW = Bytes.toBytes("testRow");
private static byte[][] ROWS = HTestConst.makeNAscii(ROW, NUM_ROWS);
// Should keep this value below 10 to keep generation of expected kv's simple. If above 10 then
// table/row/cf1/... will be followed by table/row/cf10/... instead of table/row/cf2/... which
// breaks the simple generation of expected kv's
private static int NUM_FAMILIES = 10;
private static byte[] FAMILY = Bytes.toBytes("testFamily");
private static byte[][] FAMILIES = HTestConst.makeNAscii(FAMILY, NUM_FAMILIES);
private static int NUM_QUALIFIERS = 10;
private static byte[] QUALIFIER = Bytes.toBytes("testQualifier");
private static byte[][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, NUM_QUALIFIERS);
private static int VALUE_SIZE = 1024;
private static byte[] VALUE = Bytes.createMaxByteArray(VALUE_SIZE);
private static int NUM_COLS = NUM_FAMILIES * NUM_QUALIFIERS;
// Approximation of how large the heap size of cells in our table. Should be accessed through
// getCellHeapSize().
private static long CELL_HEAP_SIZE = -1;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
TABLE = createTestTable(TABLE_NAME, ROWS, FAMILIES, QUALIFIERS, VALUE);
}
static Table createTestTable(TableName name, byte[][] rows, byte[][] families,
byte[][] qualifiers, byte[] cellValue) throws IOException {
Table ht = TEST_UTIL.createTable(name, families);
List<Put> puts = createPuts(rows, families, qualifiers, cellValue);
ht.put(puts);
return ht;
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* Ensure that the expected key values appear in a result returned from a scanner that is
* combining partial results into complete results
* @throws Exception
*/
@Test
public void testExpectedValuesOfPartialResults() throws Exception {
testExpectedValuesOfPartialResults(false);
testExpectedValuesOfPartialResults(true);
}
public void testExpectedValuesOfPartialResults(boolean reversed) throws Exception {
Scan partialScan = new Scan();
partialScan.setMaxVersions();
// Max result size of 1 ensures that each RPC request will return a single cell. The scanner
// will need to reconstruct the results into a complete result before returning to the caller
partialScan.setMaxResultSize(1);
partialScan.setReversed(reversed);
ResultScanner partialScanner = TABLE.getScanner(partialScan);
final int startRow = reversed ? ROWS.length - 1 : 0;
final int endRow = reversed ? -1 : ROWS.length;
final int loopDelta = reversed ? -1 : 1;
String message;
for (int row = startRow; row != endRow; row = row + loopDelta) {
message = "Ensuring the expected keyValues are present for row " + row;
List<Cell> expectedKeyValues = createKeyValuesForRow(ROWS[row], FAMILIES, QUALIFIERS, VALUE);
Result result = partialScanner.next();
assertFalse(result.isPartial());
verifyResult(result, expectedKeyValues, message);
}
partialScanner.close();
}
/**
* Ensure that we only see Results marked as partial when the allowPartial flag is set
* @throws Exception
*/
@Test
public void testAllowPartialResults() throws Exception {
Scan scan = new Scan();
scan.setAllowPartialResults(true);
scan.setMaxResultSize(1);
ResultScanner scanner = TABLE.getScanner(scan);
Result result = scanner.next();
assertTrue(result != null);
assertTrue(result.isPartial());
assertTrue(result.rawCells() != null);
assertTrue(result.rawCells().length == 1);
scanner.close();
scan.setAllowPartialResults(false);
scanner = TABLE.getScanner(scan);
result = scanner.next();
assertTrue(result != null);
assertTrue(!result.isPartial());
assertTrue(result.rawCells() != null);
assertTrue(result.rawCells().length == NUM_COLS);
scanner.close();
}
/**
* Ensure that the results returned from a scanner that retrieves all results in a single RPC call
* matches the results that are returned from a scanner that must incrementally combine partial
* results into complete results. A variety of scan configurations can be tested
* @throws Exception
*/
@Test
public void testEquivalenceOfScanResults() throws Exception {
Scan oneShotScan = new Scan();
oneShotScan.setMaxResultSize(Long.MAX_VALUE);
Scan partialScan = new Scan(oneShotScan);
partialScan.setMaxResultSize(1);
testEquivalenceOfScanResults(TABLE, oneShotScan, partialScan);
}
public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan scan2) throws Exception {
ResultScanner scanner1 = table.getScanner(scan1);
ResultScanner scanner2 = table.getScanner(scan2);
Result r1 = null;
Result r2 = null;
int count = 0;
while ((r1 = scanner1.next()) != null) {
r2 = scanner2.next();
assertTrue(r2 != null);
compareResults(r1, r2, "Comparing result #" + count);
count++;
}
r2 = scanner2.next();
assertTrue("r2: " + r2 + " Should be null", r2 == null);
scanner1.close();
scanner2.close();
}
/**
* Order of cells in partial results matches the ordering of cells from complete results
* @throws Exception
*/
@Test
public void testOrderingOfCellsInPartialResults() throws Exception {
Scan scan = new Scan();
for (int col = 1; col <= NUM_COLS; col++) {
scan.setMaxResultSize(getResultSizeForNumberOfCells(col));
testOrderingOfCellsInPartialResults(scan);
// Test again with a reversed scanner
scan.setReversed(true);
testOrderingOfCellsInPartialResults(scan);
}
}
public void testOrderingOfCellsInPartialResults(final Scan basePartialScan) throws Exception {
// Scan that retrieves results in pieces (partials). By setting allowPartialResults to be true
// the results will NOT be reconstructed and instead the caller will see the partial results
// returned by the server
Scan partialScan = new Scan(basePartialScan);
partialScan.setAllowPartialResults(true);
ResultScanner partialScanner = TABLE.getScanner(partialScan);
// Scan that retrieves all table results in single RPC request
Scan oneShotScan = new Scan(basePartialScan);
oneShotScan.setMaxResultSize(Long.MAX_VALUE);
oneShotScan.setCaching(ROWS.length);
ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);
Result oneShotResult = oneShotScanner.next();
Result partialResult = null;
int iterationCount = 0;
while (oneShotResult != null && oneShotResult.rawCells() != null) {
List<Cell> aggregatePartialCells = new ArrayList<Cell>();
do {
partialResult = partialScanner.next();
assertTrue("Partial Result is null. iteration: " + iterationCount, partialResult != null);
assertTrue("Partial cells are null. iteration: " + iterationCount,
partialResult.rawCells() != null);
for (Cell c : partialResult.rawCells()) {
aggregatePartialCells.add(c);
}
} while (partialResult.isPartial());
assertTrue("Number of cells differs. iteration: " + iterationCount,
oneShotResult.rawCells().length == aggregatePartialCells.size());
final Cell[] oneShotCells = oneShotResult.rawCells();
for (int cell = 0; cell < oneShotCells.length; cell++) {
Cell oneShotCell = oneShotCells[cell];
Cell partialCell = aggregatePartialCells.get(cell);
assertTrue("One shot cell was null", oneShotCell != null);
assertTrue("Partial cell was null", partialCell != null);
assertTrue("Cell differs. oneShotCell:" + oneShotCell + " partialCell:" + partialCell,
oneShotCell.equals(partialCell));
}
oneShotResult = oneShotScanner.next();
iterationCount++;
}
assertTrue(partialScanner.next() == null);
partialScanner.close();
oneShotScanner.close();
}
/**
* Setting the max result size allows us to control how many cells we expect to see on each call
* to next on the scanner. Test a variety of different sizes for correctness
* @throws Exception
*/
@Test
public void testExpectedNumberOfCellsPerPartialResult() throws Exception {
Scan scan = new Scan();
testExpectedNumberOfCellsPerPartialResult(scan);
scan.setReversed(true);
testExpectedNumberOfCellsPerPartialResult(scan);
}
public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan) throws Exception {
for (int expectedCells = 1; expectedCells <= NUM_COLS; expectedCells++) {
testExpectedNumberOfCellsPerPartialResult(baseScan, expectedCells);
}
}
public void testExpectedNumberOfCellsPerPartialResult(Scan baseScan, int expectedNumberOfCells)
throws Exception {
if (LOG.isInfoEnabled()) LOG.info("groupSize:" + expectedNumberOfCells);
// Use the cellHeapSize to set maxResultSize such that we know how many cells to expect back
// from the call. The returned results should NOT exceed expectedNumberOfCells but may be less
// than it in cases where expectedNumberOfCells is not an exact multiple of the number of
// columns in the table.
Scan scan = new Scan(baseScan);
scan.setAllowPartialResults(true);
scan.setMaxResultSize(getResultSizeForNumberOfCells(expectedNumberOfCells));
ResultScanner scanner = TABLE.getScanner(scan);
Result result = null;
byte[] prevRow = null;
while ((result = scanner.next()) != null) {
assertTrue(result.rawCells() != null);
// Cases when cell count won't equal expectedNumberOfCells:
// 1. Returned result is the final result needed to form the complete result for that row
// 2. It is the first result we have seen for that row and thus may have been fetched as
// the last group of cells that fit inside the maxResultSize
assertTrue(
"Result's cell count differed from expected number. result: " + result,
result.rawCells().length == expectedNumberOfCells || !result.isPartial()
|| !Bytes.equals(prevRow, result.getRow()));
prevRow = result.getRow();
}
scanner.close();
}
/**
* @return The approximate heap size of a cell in the test table. All cells should have
* approximately the same heap size, so the value is cached to avoid repeating the
* calculation
* @throws Exception
*/
private long getCellHeapSize() throws Exception {
if (CELL_HEAP_SIZE == -1) {
// Do a partial scan that will return a single result with a single cell
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
ResultScanner scanner = TABLE.getScanner(scan);
Result result = scanner.next();
assertTrue(result != null);
assertTrue(result.rawCells() != null);
assertTrue(result.rawCells().length == 1);
CELL_HEAP_SIZE = CellUtil.estimatedHeapSizeOf(result.rawCells()[0]);
if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE);
scanner.close();
}
return CELL_HEAP_SIZE;
}
/**
* @param numberOfCells
* @return the result size that should be used in {@link Scan#setMaxResultSize(long)} if you want
* the server to return exactly numberOfCells cells
* @throws Exception
*/
private long getResultSizeForNumberOfCells(int numberOfCells) throws Exception {
return getCellHeapSize() * numberOfCells;
}
/**
* Test various combinations of batching and partial results for correctness
*/
@Test
public void testPartialResultsAndBatch() throws Exception {
for (int batch = 1; batch <= NUM_COLS / 4; batch++) {
for (int cellsPerPartial = 1; cellsPerPartial <= NUM_COLS / 4; cellsPerPartial++) {
testPartialResultsAndBatch(batch, cellsPerPartial);
}
}
}
public void testPartialResultsAndBatch(final int batch, final int cellsPerPartialResult)
throws Exception {
if (LOG.isInfoEnabled()) {
LOG.info("batch: " + batch + " cellsPerPartialResult: " + cellsPerPartialResult);
}
Scan scan = new Scan();
scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult));
scan.setBatch(batch);
ResultScanner scanner = TABLE.getScanner(scan);
Result result = scanner.next();
int repCount = 0;
while ((result = scanner.next()) != null) {
assertTrue(result.rawCells() != null);
if (result.isPartial()) {
final String error =
"Cells:" + result.rawCells().length + " Batch size:" + batch
+ " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount;
assertTrue(error, result.rawCells().length <= Math.min(batch, cellsPerPartialResult));
} else {
assertTrue(result.rawCells().length <= batch);
}
repCount++;
}
scanner.close();
}
/**
* Test the method {@link Result#createCompleteResult(List, Result)}
* @throws Exception
*/
@Test
public void testPartialResultsReassembly() throws Exception {
Scan scan = new Scan();
testPartialResultsReassembly(scan);
scan.setReversed(true);
testPartialResultsReassembly(scan);
}
public void testPartialResultsReassembly(Scan scanBase) throws Exception {
Scan partialScan = new Scan(scanBase);
partialScan.setMaxResultSize(1);
partialScan.setAllowPartialResults(true);
ResultScanner partialScanner = TABLE.getScanner(partialScan);
Scan oneShotScan = new Scan(scanBase);
oneShotScan.setMaxResultSize(Long.MAX_VALUE);
ResultScanner oneShotScanner = TABLE.getScanner(oneShotScan);
ArrayList<Result> partials = new ArrayList<>();
for (int i = 0; i < NUM_ROWS; i++) {
Result partialResult = null;
Result completeResult = null;
Result oneShotResult = null;
partials.clear();
do {
partialResult = partialScanner.next();
partials.add(partialResult);
} while (partialResult != null && partialResult.isPartial());
completeResult = Result.createCompleteResult(partials);
oneShotResult = oneShotScanner.next();
compareResults(completeResult, oneShotResult, null);
}
assertTrue(oneShotScanner.next() == null);
assertTrue(partialScanner.next() == null);
oneShotScanner.close();
partialScanner.close();
}
/**
* When reconstructing the complete result from its partials we ensure that the row of each
* partial result is the same. If one of the rows differs, an exception is thrown.
*/
@Test
public void testExceptionThrownOnMismatchedPartialResults() throws IOException {
assertTrue(NUM_ROWS >= 2);
ArrayList<Result> partials = new ArrayList<>();
Scan scan = new Scan();
scan.setMaxResultSize(Long.MAX_VALUE);
ResultScanner scanner = TABLE.getScanner(scan);
Result r1 = scanner.next();
partials.add(r1);
Result r2 = scanner.next();
partials.add(r2);
assertFalse(Bytes.equals(r1.getRow(), r2.getRow()));
try {
Result.createCompleteResult(partials);
fail("r1 and r2 are from different rows. It should not be possible to combine them into"
+ " a single result");
} catch (IOException e) {
}
scanner.close();
}
/**
* When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
* true, the scanner should not return partial results. The scanner cannot return partial results
* because the entire row needs to be read for the include/exclude decision to be made
*/
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
// If a filter hasFilter() is true then partial results should not be returned else filter
// application server side would break.
scan.setFilter(new RandomRowFilter(1.0f));
ResultScanner scanner = TABLE.getScanner(scan);
Result r = null;
while ((r = scanner.next()) != null) {
assertFalse(r.isPartial());
}
scanner.close();
}
/**
* Examine the interaction between the maxResultSize and caching. If the caching limit is reached
* before the maxResultSize limit, we should not see partial results. On the other hand, if the
* maxResultSize limit is reached before the caching limit, it is likely that partial results will
* be seen.
* @throws Exception
*/
@Test
public void testPartialResultsAndCaching() throws Exception {
for (int caching = 1; caching <= NUM_ROWS; caching++) {
for (int maxResultRows = 0; maxResultRows <= NUM_ROWS; maxResultRows++) {
testPartialResultsAndCaching(maxResultRows, caching);
}
}
}
/**
* @param resultSizeRowLimit The row limit that will be enforced through maxResultSize
* @param cachingRowLimit The row limit that will be enforced through caching
* @throws Exception
*/
public void testPartialResultsAndCaching(int resultSizeRowLimit, int cachingRowLimit)
throws Exception {
Scan scan = new Scan();
scan.setAllowPartialResults(true);
// The number of cells specified in the call to getResultSizeForNumberOfCells is offset to
// ensure that the result size we specify is not an exact multiple of the number of cells
// in a row. This ensures that partial results will be returned when the result size limit
// is reached before the caching limit.
int cellOffset = NUM_COLS / 3;
long maxResultSize = getResultSizeForNumberOfCells(resultSizeRowLimit * NUM_COLS + cellOffset);
scan.setMaxResultSize(maxResultSize);
scan.setCaching(cachingRowLimit);
ResultScanner scanner = TABLE.getScanner(scan);
ClientScanner clientScanner = (ClientScanner) scanner;
Result r = null;
// Approximate the number of rows we expect will fit into the specified max rsult size. If this
// approximation is less than caching, then we expect that the max result size limit will be
// hit before the caching limit and thus partial results may be seen
boolean expectToSeePartialResults = resultSizeRowLimit < cachingRowLimit;
while ((r = clientScanner.next()) != null) {
assertTrue(!r.isPartial() || expectToSeePartialResults);
}
scanner.close();
}
/**
* Small scans should not return partial results because it would prevent small scans from
* retrieving all of the necessary results in a single RPC request which is what makese small
* scans useful. Thus, ensure that even when {@link Scan#getAllowPartialResults()} is true, small
* scans do not return partial results
* @throws Exception
*/
@Test
public void testSmallScansDoNotAllowPartials() throws Exception {
Scan scan = new Scan();
testSmallScansDoNotAllowPartials(scan);
scan.setReversed(true);
testSmallScansDoNotAllowPartials(scan);
}
public void testSmallScansDoNotAllowPartials(Scan baseScan) throws Exception {
Scan scan = new Scan(baseScan);
scan.setAllowPartialResults(true);
scan.setSmall(true);
scan.setMaxResultSize(1);
ResultScanner scanner = TABLE.getScanner(scan);
Result r = null;
while ((r = scanner.next()) != null) {
assertFalse(r.isPartial());
}
scanner.close();
}
/**
* Make puts to put the input value into each combination of row, family, and qualifier
* @param rows
* @param families
* @param qualifiers
* @param value
* @return
* @throws IOException
*/
static ArrayList<Put> createPuts(byte[][] rows, byte[][] families, byte[][] qualifiers,
byte[] value) throws IOException {
Put put;
ArrayList<Put> puts = new ArrayList<>();
for (int row = 0; row < rows.length; row++) {
put = new Put(rows[row]);
for (int fam = 0; fam < families.length; fam++) {
for (int qual = 0; qual < qualifiers.length; qual++) {
KeyValue kv = new KeyValue(rows[row], families[fam], qualifiers[qual], qual, value);
put.add(kv);
}
}
puts.add(put);
}
return puts;
}
/**
* Make key values to represent each possible combination of family and qualifier in the specified
* row.
* @param row
* @param families
* @param qualifiers
* @param value
* @return
*/
static ArrayList<Cell> createKeyValuesForRow(byte[] row, byte[][] families, byte[][] qualifiers,
byte[] value) {
ArrayList<Cell> outList = new ArrayList<>();
for (int fam = 0; fam < families.length; fam++) {
for (int qual = 0; qual < qualifiers.length; qual++) {
outList.add(new KeyValue(row, families[fam], qualifiers[qual], qual, value));
}
}
return outList;
}
/**
* Verifies that result contains all the key values within expKvList. Fails the test otherwise
* @param result
* @param expKvList
* @param msg
*/
static void verifyResult(Result result, List<Cell> expKvList, String msg) {
if (LOG.isInfoEnabled()) {
LOG.info(msg);
LOG.info("Expected count: " + expKvList.size());
LOG.info("Actual count: " + result.size());
}
if (expKvList.size() == 0) return;
int i = 0;
for (Cell kv : result.rawCells()) {
if (i >= expKvList.size()) {
break; // we will check the size later
}
Cell kvExp = expKvList.get(i++);
assertTrue("Not equal. get kv: " + kv.toString() + " exp kv: " + kvExp.toString(),
kvExp.equals(kv));
}
assertEquals(expKvList.size(), result.size());
}
/**
* Compares two results and fails the test if the results are different
* @param r1
* @param r2
* @param message
*/
static void compareResults(Result r1, Result r2, final String message) {
if (LOG.isInfoEnabled()) {
if (message != null) LOG.info(message);
LOG.info("r1: " + r1);
LOG.info("r2: " + r2);
}
final String failureMessage = "Results r1:" + r1 + " \nr2:" + r2 + " are not equivalent";
if (r1 == null && r2 == null) fail(failureMessage);
else if (r1 == null || r2 == null) fail(failureMessage);
try {
Result.compareResults(r1, r2);
} catch (Exception e) {
fail(failureMessage);
}
}
@Test
public void testReadPointAndPartialResults() throws Exception {
TableName testName = TableName.valueOf("testReadPointAndPartialResults");
int numRows = 5;
int numFamilies = 5;
int numQualifiers = 5;
byte[][] rows = HTestConst.makeNAscii(Bytes.toBytes("testRow"), numRows);
byte[][] families = HTestConst.makeNAscii(Bytes.toBytes("testFamily"), numFamilies);
byte[][] qualifiers = HTestConst.makeNAscii(Bytes.toBytes("testQualifier"), numQualifiers);
byte[] value = Bytes.createMaxByteArray(100);
Table tmpTable = createTestTable(testName, rows, families, qualifiers, value);
Scan scan = new Scan();
scan.setMaxResultSize(1);
scan.setAllowPartialResults(true);
// Open scanner before deletes
ResultScanner scanner = tmpTable.getScanner(scan);
Delete delete1 = new Delete(rows[0]);
delete1.addColumn(families[0], qualifiers[0], 0);
tmpTable.delete(delete1);
Delete delete2 = new Delete(rows[1]);
delete2.addColumn(families[1], qualifiers[1], 1);
tmpTable.delete(delete2);
// Should see all cells because scanner was opened prior to deletes
int scannerCount = countCellsFromScanner(scanner);
int expectedCount = numRows * numFamilies * numQualifiers;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
scannerCount == expectedCount);
// Minus 2 for the two cells that were deleted
scanner = tmpTable.getScanner(scan);
scannerCount = countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers - 2;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
scannerCount == expectedCount);
scanner = tmpTable.getScanner(scan);
// Put in 2 new rows. The timestamps differ from the deleted rows
Put put1 = new Put(rows[0]);
put1.add(new KeyValue(rows[0], families[0], qualifiers[0], 1, value));
tmpTable.put(put1);
Put put2 = new Put(rows[1]);
put2.add(new KeyValue(rows[1], families[1], qualifiers[1], 2, value));
tmpTable.put(put2);
// Scanner opened prior to puts. Cell count shouldn't have changed
scannerCount = countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers - 2;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
scannerCount == expectedCount);
// Now the scanner should see the cells that were added by puts
scanner = tmpTable.getScanner(scan);
scannerCount = countCellsFromScanner(scanner);
expectedCount = numRows * numFamilies * numQualifiers;
assertTrue("scannerCount: " + scannerCount + " expectedCount: " + expectedCount,
scannerCount == expectedCount);
TEST_UTIL.deleteTable(testName);
}
/**
* Exhausts the scanner by calling next repetitively. Once completely exhausted, close scanner and
* return total cell count
* @param scanner
* @return
* @throws Exception
*/
private int countCellsFromScanner(ResultScanner scanner) throws Exception {
Result result = null;
int numCells = 0;
while ((result = scanner.next()) != null) {
numCells += result.rawCells().length;
}
scanner.close();
return numCells;
}
/**
* Test partial Result re-assembly in the presence of different filters. The Results from the
* partial scanner should match the Results returned from a scanner that receives all of the
* results in one RPC to the server. The partial scanner is tested with a variety of different
* result sizes (all of which are less than the size necessary to fetch an entire row)
* @throws Exception
*/
@Test
public void testPartialResultsWithColumnFilter() throws Exception {
testPartialResultsWithColumnFilter(new FirstKeyOnlyFilter());
testPartialResultsWithColumnFilter(new ColumnPrefixFilter(Bytes.toBytes("testQualifier5")));
testPartialResultsWithColumnFilter(new ColumnRangeFilter(Bytes.toBytes("testQualifer1"), true,
Bytes.toBytes("testQualifier7"), true));
Set<byte[]> qualifiers = new LinkedHashSet<>();
qualifiers.add(Bytes.toBytes("testQualifier5"));
testPartialResultsWithColumnFilter(new FirstKeyValueMatchingQualifiersFilter(qualifiers));
}
public void testPartialResultsWithColumnFilter(Filter filter) throws Exception {
assertTrue(!filter.hasFilterRow());
Scan partialScan = new Scan();
partialScan.setFilter(filter);
Scan oneshotScan = new Scan();
oneshotScan.setFilter(filter);
oneshotScan.setMaxResultSize(Long.MAX_VALUE);
for (int i = 1; i <= NUM_COLS; i++) {
partialScan.setMaxResultSize(getResultSizeForNumberOfCells(i));
testEquivalenceOfScanResults(TABLE, partialScan, oneshotScan);
}
}
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
/**
* Test to verify that the cloned table is independent of the table from which it was cloned
*/
@Category(LargeTests.class)
public class TestMobSnapshotCloneIndependence extends TestSnapshotCloneIndependence {
private static final Log LOG = LogFactory.getLog(TestMobSnapshotCloneIndependence.class);
/**
* Setup the config for the cluster and start it
* @throws Exception on failure
*/
@BeforeClass
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
}
protected static void setupConf(Configuration conf) {
TestSnapshotCloneIndependence.setupConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@Override
protected Table createTable(final TableName table, byte[] family) throws Exception {
return MobSnapshotTestingUtils.createMobTable(UTIL, table, family);
}
@Override
public void loadData(final Table table, byte[]... families) throws Exception {
SnapshotTestingUtils.loadData(UTIL, table.getName(), 1000, families);
}
@Override
protected int countRows(final Table table, final byte[]... families) throws Exception {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

View File

@ -0,0 +1,481 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.Ignore;
import org.junit.experimental.categories.Category;
/**
* Test to verify that the cloned table is independent of the table from which it was cloned
*/
@Category({LargeTests.class, ClientTests.class})
public class TestSnapshotCloneIndependence {
private static final Log LOG = LogFactory.getLog(TestSnapshotCloneIndependence.class);
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final String TEST_FAM_STR = "fam";
protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
private static final int CLEANER_INTERVAL = 100;
/**
* Setup the config for the cluster and start it
* @throws Exception on fOailure
*/
@BeforeClass
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
}
static void setupConf(Configuration conf) {
// Up the handlers; this test needs more than usual.
conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 15);
// enable snapshot support
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
conf.setInt("hbase.master.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around
// some files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
conf.setInt("hbase.regionserver.msginterval", 100);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
// Avoid potentially aggressive splitting which would cause snapshot to fail
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
// Execute cleaner frequently to induce failures
conf.setInt("hbase.master.cleaner.interval", CLEANER_INTERVAL);
conf.setInt("hbase.master.hfilecleaner.plugins.snapshot.period", CLEANER_INTERVAL);
// Effectively disable TimeToLiveHFileCleaner. Don't want to fully disable it because that
// will even trigger races between creating the directory containing back references and
// the back reference itself.
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
}
@Before
public void setup() throws Exception {
createTable(TABLE_NAME, TEST_FAM);
}
@After
public void tearDown() throws Exception {
UTIL.deleteTable(TABLE_NAME);
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
}
@AfterClass
public static void cleanupTest() throws Exception {
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
/**
* Verify that adding data to the cloned table will not affect the original, and vice-versa when
* it is taken as an online snapshot.
*/
@Ignore ("Flakey. Fix") @Test (timeout=300000)
public void testOnlineSnapshotAppendIndependent() throws Exception {
runTestSnapshotAppendIndependent(true);
}
/**
* Verify that adding data to the cloned table will not affect the original, and vice-versa when
* it is taken as an offline snapshot.
*/
@Test (timeout=300000)
@Ignore
public void testOfflineSnapshotAppendIndependent() throws Exception {
runTestSnapshotAppendIndependent(false);
}
/**
* Verify that adding metadata to the cloned table will not affect the original, and vice-versa
* when it is taken as an online snapshot.
*/
@Test (timeout=300000)
public void testOnlineSnapshotMetadataChangesIndependent() throws Exception {
runTestSnapshotMetadataChangesIndependent(true);
}
/**
* Verify that adding netadata to the cloned table will not affect the original, and vice-versa
* when is taken as an online snapshot.
*/
@Test (timeout=300000)
@Ignore
public void testOfflineSnapshotMetadataChangesIndependent() throws Exception {
runTestSnapshotMetadataChangesIndependent(false);
}
/**
* Verify that region operations, in this case splitting a region, are independent between the
* cloned table and the original.
*/
@Test (timeout=300000)
@Ignore
public void testOfflineSnapshotRegionOperationsIndependent() throws Exception {
runTestRegionOperationsIndependent(false);
}
/**
* Verify that region operations, in this case splitting a region, are independent between the
* cloned table and the original.
*/
@Test (timeout=300000)
public void testOnlineSnapshotRegionOperationsIndependent() throws Exception {
runTestRegionOperationsIndependent(true);
}
@Test (timeout=300000)
@Ignore
public void testOfflineSnapshotDeleteIndependent() throws Exception {
runTestSnapshotDeleteIndependent(false);
}
@Ignore ("Flakey test") @Test (timeout=300000)
public void testOnlineSnapshotDeleteIndependent() throws Exception {
runTestSnapshotDeleteIndependent(true);
}
private static void waitOnSplit(Connection c, final Table t, int originalCount) throws Exception {
for (int i = 0; i < 200; i++) {
try {
Thread.sleep(500);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
try (RegionLocator locator = c.getRegionLocator(t.getName())) {
if (locator.getAllRegionLocations().size() > originalCount) {
return;
}
}
}
throw new Exception("Split did not increase the number of regions");
}
/*
* Take a snapshot of a table, add data, and verify that this only
* affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestSnapshotAppendIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
try (Table original = createTable(localTableName, TEST_FAM)) {
loadData(original, TEST_FAM);
final int origTableRowCount = countRows(original);
// Take a snapshot
final String snapshotNameAsString = "snapshot_" + localTableName;
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
tryDisable(admin, localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName);
try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
// Make sure that all the regions are available before starting
UTIL.waitUntilAllRegionsAssigned(cloneTableName);
final int clonedTableRowCount = countRows(clonedTable);
Assert.assertEquals(
"The line counts of original and cloned tables do not match after clone. ",
origTableRowCount, clonedTableRowCount);
// Attempt to add data to the test
final String rowKey = "new-row-" + System.currentTimeMillis();
Put p = new Put(Bytes.toBytes(rowKey));
p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
original.put(p);
// Verify that it is not present in the original table
Assert.assertEquals("The row count of the original table was not modified by the put",
origTableRowCount + 1, countRows(original));
Assert.assertEquals(
"The row count of the cloned table changed as a result of addition to the original",
clonedTableRowCount, countRows(clonedTable));
p = new Put(Bytes.toBytes(rowKey));
p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
clonedTable.put(p);
// Verify that the new family is not in the restored table's description
Assert.assertEquals(
"The row count of the original table was modified by the put to the clone",
origTableRowCount + 1, countRows(original));
Assert.assertEquals("The row count of the cloned table was not modified by the put",
clonedTableRowCount + 1, countRows(clonedTable));
}
}
}
/*
* Take a snapshot of a table, do a split, and verify that this only affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestRegionOperationsIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = createTable(localTableName, TEST_FAM);
loadData(original, TEST_FAM);
final int loadedTableCount = countRows(original);
System.out.println("Original table has: " + loadedTableCount + " rows");
final String snapshotNameAsString = "snapshot_" + localTableName;
// Create a snapshot
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
tryDisable(admin, localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
admin.cloneSnapshot(snapshotName, cloneTableName);
// Verify that region information is the same pre-split
((ClusterConnection) UTIL.getConnection()).clearRegionCache();
List<HRegionInfo> originalTableHRegions = admin.getTableRegions(localTableName);
final int originalRegionCount = originalTableHRegions.size();
final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size();
Assert.assertEquals(
"The number of regions in the cloned table is different than in the original table.",
originalRegionCount, cloneTableRegionCount);
// Split a region on the parent table
admin.splitRegion(originalTableHRegions.get(0).getRegionName());
waitOnSplit(UTIL.getConnection(), original, originalRegionCount);
// Verify that the cloned table region is not split
final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size();
Assert.assertEquals(
"The number of regions in the cloned table changed though none of its regions were split.",
cloneTableRegionCount, cloneTableRegionCount2);
}
/*
* Take a snapshot of a table, add metadata, and verify that this only
* affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestSnapshotMetadataChangesIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = createTable(localTableName, TEST_FAM);
loadData(original, TEST_FAM);
final String snapshotNameAsString = "snapshot_" + localTableName;
// Create a snapshot
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
tryDisable(admin, localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
admin.cloneSnapshot(snapshotName, cloneTableName);
// Add a new column family to the original table
byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
tryDisable(admin, localTableName);
admin.addColumnFamily(localTableName, hcd);
// Verify that it is not in the snapshot
admin.enableTable(localTableName);
UTIL.waitTableAvailable(localTableName);
// get a description of the cloned table
// get a list of its families
// assert that the family is there
HTableDescriptor originalTableDescriptor = original.getTableDescriptor();
HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName);
Assert.assertTrue("The original family was not found. There is something wrong. ",
originalTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The original family was not found in the clone. There is something wrong. ",
clonedTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The new family was not found. ",
originalTableDescriptor.hasFamily(TEST_FAM_2));
Assert.assertTrue("The new family was not found. ",
!clonedTableDescriptor.hasFamily(TEST_FAM_2));
}
private void tryDisable(Admin admin, TableName localTableName) throws IOException {
int offlineRetry = 0;
while ( offlineRetry < 5 && admin.isTableEnabled(localTableName)) {
try {
admin.disableTable(localTableName);
} catch (IOException ioe) {
LOG.warn("Error disabling the table", ioe);
}
offlineRetry ++;
}
}
/*
* Take a snapshot of a table, add data, and verify that deleting the snapshot does not affect
* either table.
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestSnapshotDeleteIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
final Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
try (Table original = createTable(localTableName, TEST_FAM)) {
loadData(original, TEST_FAM);
}
// Take a snapshot
final String snapshotNameAsString = "snapshot_" + localTableName;
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
tryDisable(admin, localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName);
UTIL.waitUntilAllRegionsAssigned(cloneTableName);
// Ensure the original table does not reference the HFiles anymore
admin.majorCompact(localTableName);
// Deleting the snapshot used to break the cloned table by deleting in-use HFiles
admin.deleteSnapshot(snapshotName);
// Wait for cleaner run and DFS heartbeats so that anything that is deletable is fully deleted
do {
Thread.sleep(5000);
} while (!admin.listSnapshots(snapshotNameAsString).isEmpty());
try (Table original = UTIL.getConnection().getTable(localTableName)) {
try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
// Verify that all regions of both tables are readable
final int origTableRowCount = countRows(original);
final int clonedTableRowCount = countRows(clonedTable);
Assert.assertEquals(origTableRowCount, clonedTableRowCount);
}
}
}
protected Table createTable(final TableName table, byte[] family) throws Exception {
Table t = UTIL.createTable(table, family);
// Wait for everything to be ready with the table
UTIL.waitUntilAllRegionsAssigned(table);
// At this point the table should be good to go.
return t;
}
protected void loadData(final Table table, byte[]... families) throws Exception {
UTIL.loadTable(table, families);
}
protected int countRows(final Table table, final byte[]... families) throws Exception {
return UTIL.countRows(table, families);
}
}

View File

@ -0,0 +1,90 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.balancer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({FlakeyTests.class, MediumTests.class})
public class TestStochasticLoadBalancer2 extends BalancerTestBase {
private static final Log LOG = LogFactory.getLog(TestStochasticLoadBalancer2.class);
@Test (timeout = 800000)
public void testRegionReplicasOnMidCluster() {
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
TestStochasticLoadBalancer.loadBalancer.setConf(conf);
int numNodes = 200;
int numRegions = 40 * 200;
int replication = 3; // 3 replicas per region
int numRegionsPerServer = 30; //all regions are mostly balanced
int numTables = 10;
testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
}
@Test (timeout = 800000)
public void testRegionReplicasOnLargeCluster() {
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
loadBalancer.setConf(conf);
int numNodes = 1000;
int numRegions = 20 * numNodes; // 20 * replication regions per RS
int numRegionsPerServer = 19; // all servers except one
int numTables = 100;
int replication = 3;
testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, true);
}
@Test (timeout = 800000)
public void testRegionReplicasOnMidClusterHighReplication() {
conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 4000000L);
conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
loadBalancer.setConf(conf);
int numNodes = 80;
int numRegions = 6 * numNodes;
int replication = 80; // 80 replicas per region, one for each server
int numRegionsPerServer = 5;
int numTables = 10;
testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, false, true);
}
@Test (timeout = 800000)
public void testRegionReplicationOnMidClusterReplicationGreaterThanNumNodes() {
conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L);
conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec
conf.setFloat("hbase.master.balancer.stochastic.localityCost", 0);
conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f);
loadBalancer.setConf(conf);
int numNodes = 40;
int numRegions = 6 * 50;
int replication = 50; // 50 replicas per region, more than numNodes
int numRegionsPerServer = 6;
int numTables = 10;
testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, true, false);
}
}

View File

@ -0,0 +1,514 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.EnableTableState;
import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.TruncateTableState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@Category({MasterTests.class, LargeTests.class})
public class TestMasterFailoverWithProcedures {
private static final Log LOG = LogFactory.getLog(TestMasterFailoverWithProcedures.class);
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static void setupConf(Configuration conf) {
// don't waste time retrying with the roll, the test is already slow enough.
conf.setInt("hbase.procedure.store.wal.max.retries.before.roll", 1);
conf.setInt("hbase.procedure.store.wal.wait.before.roll", 0);
conf.setInt("hbase.procedure.store.wal.max.roll.retries", 1);
conf.setInt("hbase.procedure.store.wal.sync.failure.roll.max", 1);
}
@Before
public void setup() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(2, 1);
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, false);
ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, false);
}
@After
public void tearDown() throws Exception {
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
@Test(timeout=60000)
public void testWalRecoverLease() throws Exception {
final ProcedureStore masterStore = getMasterProcedureExecutor().getStore();
assertTrue("expected WALStore for this test", masterStore instanceof WALProcedureStore);
HMaster firstMaster = UTIL.getHBaseCluster().getMaster();
// Abort Latch for the master store
final CountDownLatch masterStoreAbort = new CountDownLatch(1);
masterStore.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {}
@Override
public void abortProcess() {
LOG.debug("Abort store of Master");
masterStoreAbort.countDown();
}
});
// startup a fake master the new WAL store will take the lease
// and the active master should abort.
HMaster backupMaster3 = Mockito.mock(HMaster.class);
Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration();
Mockito.doReturn(true).when(backupMaster3).isActiveMaster();
final WALProcedureStore backupStore3 = new WALProcedureStore(firstMaster.getConfiguration(),
firstMaster.getMasterFileSystem().getFileSystem(),
((WALProcedureStore)masterStore).getLogDir(),
new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3));
// Abort Latch for the test store
final CountDownLatch backupStore3Abort = new CountDownLatch(1);
backupStore3.registerListener(new ProcedureStore.ProcedureStoreListener() {
@Override
public void postSync() {}
@Override
public void abortProcess() {
LOG.debug("Abort store of backupMaster3");
backupStore3Abort.countDown();
backupStore3.stop(true);
}
});
backupStore3.start(1);
backupStore3.recoverLease();
// Try to trigger a command on the master (WAL lease expired on the active one)
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf("mtb"), "f");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
LOG.debug("submit proc");
try {
getMasterProcedureExecutor().submitProcedure(
new CreateTableProcedure(getMasterProcedureExecutor().getEnvironment(), htd, regions));
fail("expected RuntimeException 'sync aborted'");
} catch (RuntimeException e) {
LOG.info("got " + e.getMessage());
}
LOG.debug("wait master store abort");
masterStoreAbort.await();
// Now the real backup master should start up
LOG.debug("wait backup master to startup");
waitBackupMaster(UTIL, firstMaster);
assertEquals(true, firstMaster.isStopped());
// wait the store in here to abort (the test will fail due to timeout if it doesn't)
LOG.debug("wait the store to abort");
backupStore3.getStoreTracker().setDeleted(1, false);
try {
backupStore3.delete(1);
fail("expected RuntimeException 'sync aborted'");
} catch (RuntimeException e) {
LOG.info("got " + e.getMessage());
}
backupStore3Abort.await();
}
/**
* Tests proper fencing in case the current WAL store is fenced
*/
@Test
public void testWALfencingWithoutWALRolling() throws IOException {
testWALfencing(false);
}
/**
* Tests proper fencing in case the current WAL store does not receive writes until after the
* new WAL does a couple of WAL rolls.
*/
@Test
public void testWALfencingWithWALRolling() throws IOException {
testWALfencing(true);
}
public void testWALfencing(boolean walRolls) throws IOException {
final ProcedureStore procStore = getMasterProcedureExecutor().getStore();
assertTrue("expected WALStore for this test", procStore instanceof WALProcedureStore);
HMaster firstMaster = UTIL.getHBaseCluster().getMaster();
// cause WAL rolling after a delete in WAL:
firstMaster.getConfiguration().setLong("hbase.procedure.store.wal.roll.threshold", 1);
HMaster backupMaster3 = Mockito.mock(HMaster.class);
Mockito.doReturn(firstMaster.getConfiguration()).when(backupMaster3).getConfiguration();
Mockito.doReturn(true).when(backupMaster3).isActiveMaster();
final WALProcedureStore procStore2 = new WALProcedureStore(firstMaster.getConfiguration(),
firstMaster.getMasterFileSystem().getFileSystem(),
((WALProcedureStore)procStore).getLogDir(),
new MasterProcedureEnv.WALStoreLeaseRecovery(backupMaster3));
// start a second store which should fence the first one out
LOG.info("Starting new WALProcedureStore");
procStore2.start(1);
procStore2.recoverLease();
// before writing back to the WAL store, optionally do a couple of WAL rolls (which causes
// to delete the old WAL files).
if (walRolls) {
LOG.info("Inserting into second WALProcedureStore, causing WAL rolls");
for (int i = 0; i < 512; i++) {
// insert something to the second store then delete it, causing a WAL roll(s)
Procedure proc2 = new TestProcedure(i);
procStore2.insert(proc2, null);
procStore2.delete(proc2.getProcId()); // delete the procedure so that the WAL is removed later
}
}
// Now, insert something to the first store, should fail.
// If the store does a WAL roll and continue with another logId without checking higher logIds
// it will incorrectly succeed.
LOG.info("Inserting into first WALProcedureStore");
try {
procStore.insert(new TestProcedure(11), null);
fail("Inserting into Procedure Store should have failed");
} catch (Exception ex) {
LOG.info("Received expected exception", ex);
}
}
// ==========================================================================
// Test Create Table
// ==========================================================================
@Test(timeout=60000)
public void testCreateWithFailover() throws Exception {
// TODO: Should we try every step? (master failover takes long time)
// It is already covered by TestCreateTableProcedure
// but without the master restart, only the executor/store is restarted.
// Without Master restart we may not find bug in the procedure code
// like missing "wait" for resources to be available (e.g. RS)
testCreateWithFailoverAtStep(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS.ordinal());
}
private void testCreateWithFailoverAtStep(final int step) throws Exception {
final TableName tableName = TableName.valueOf("testCreateWithFailoverAtStep" + step);
// create the table
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values());
MasterProcedureTestingUtility.validateTableCreation(
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
// ==========================================================================
// Test Delete Table
// ==========================================================================
@Test(timeout=60000)
public void testDeleteWithFailover() throws Exception {
// TODO: Should we try every step? (master failover takes long time)
// It is already covered by TestDeleteTableProcedure
// but without the master restart, only the executor/store is restarted.
// Without Master restart we may not find bug in the procedure code
// like missing "wait" for resources to be available (e.g. RS)
testDeleteWithFailoverAtStep(DeleteTableState.DELETE_TABLE_UNASSIGN_REGIONS.ordinal());
}
private void testDeleteWithFailoverAtStep(final int step) throws Exception {
final TableName tableName = TableName.valueOf("testDeleteWithFailoverAtStep" + step);
// create the table
byte[][] splitKeys = null;
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
Path tableDir = FSUtils.getTableDir(getRootDir(), tableName);
MasterProcedureTestingUtility.validateTableCreation(
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillBeforeStoreUpdate(procExec, true);
ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExec, true);
// Start the Delete procedure && kill the executor
long procId = procExec.submitProcedure(
new DeleteTableProcedure(procExec.getEnvironment(), tableName));
testRecoveryAndDoubleExecution(UTIL, procId, step, DeleteTableState.values());
MasterProcedureTestingUtility.validateTableDeletion(
UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");
}
// ==========================================================================
// Test Truncate Table
// ==========================================================================
@Test(timeout=90000)
public void testTruncateWithFailover() throws Exception {
// TODO: Should we try every step? (master failover takes long time)
// It is already covered by TestTruncateTableProcedure
// but without the master restart, only the executor/store is restarted.
// Without Master restart we may not find bug in the procedure code
// like missing "wait" for resources to be available (e.g. RS)
testTruncateWithFailoverAtStep(true, TruncateTableState.TRUNCATE_TABLE_ADD_TO_META.ordinal());
}
private void testTruncateWithFailoverAtStep(final boolean preserveSplits, final int step)
throws Exception {
final TableName tableName = TableName.valueOf("testTruncateWithFailoverAtStep" + step);
// create the table
final String[] families = new String[] { "f1", "f2" };
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
getMasterProcedureExecutor(), tableName, splitKeys, families);
// load and verify that there are rows in the table
MasterProcedureTestingUtility.loadData(
UTIL.getConnection(), tableName, 100, splitKeys, families);
assertEquals(100, UTIL.countRows(tableName));
// disable the table
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Truncate procedure && kill the executor
long procId = procExec.submitProcedure(
new TruncateTableProcedure(procExec.getEnvironment(), tableName, preserveSplits));
testRecoveryAndDoubleExecution(UTIL, procId, step, TruncateTableState.values());
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
UTIL.waitUntilAllRegionsAssigned(tableName);
// validate the table regions and layout
if (preserveSplits) {
assertEquals(1 + splitKeys.length, UTIL.getHBaseAdmin().getTableRegions(tableName).size());
} else {
regions = UTIL.getHBaseAdmin().getTableRegions(tableName).toArray(new HRegionInfo[1]);
assertEquals(1, regions.length);
}
MasterProcedureTestingUtility.validateTableCreation(
UTIL.getHBaseCluster().getMaster(), tableName, regions, families);
// verify that there are no rows in the table
assertEquals(0, UTIL.countRows(tableName));
// verify that the table is read/writable
MasterProcedureTestingUtility.loadData(
UTIL.getConnection(), tableName, 50, splitKeys, families);
assertEquals(50, UTIL.countRows(tableName));
}
// ==========================================================================
// Test Disable Table
// ==========================================================================
@Test(timeout=60000)
public void testDisableTableWithFailover() throws Exception {
// TODO: Should we try every step? (master failover takes long time)
// It is already covered by TestDisableTableProcedure
// but without the master restart, only the executor/store is restarted.
// Without Master restart we may not find bug in the procedure code
// like missing "wait" for resources to be available (e.g. RS)
testDisableTableWithFailoverAtStep(
DisableTableState.DISABLE_TABLE_MARK_REGIONS_OFFLINE.ordinal());
}
private void testDisableTableWithFailoverAtStep(final int step) throws Exception {
final TableName tableName = TableName.valueOf("testDisableTableWithFailoverAtStep" + step);
// create the table
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
MasterProcedureTestingUtility.createTable(
getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Delete procedure && kill the executor
long procId = procExec.submitProcedure(
new DisableTableProcedure(procExec.getEnvironment(), tableName, false));
testRecoveryAndDoubleExecution(UTIL, procId, step, DisableTableState.values());
MasterProcedureTestingUtility.validateTableIsDisabled(
UTIL.getHBaseCluster().getMaster(), tableName);
}
// ==========================================================================
// Test Enable Table
// ==========================================================================
@Test(timeout=60000)
public void testEnableTableWithFailover() throws Exception {
// TODO: Should we try every step? (master failover takes long time)
// It is already covered by TestEnableTableProcedure
// but without the master restart, only the executor/store is restarted.
// Without Master restart we may not find bug in the procedure code
// like missing "wait" for resources to be available (e.g. RS)
testEnableTableWithFailoverAtStep(
EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE.ordinal());
}
private void testEnableTableWithFailoverAtStep(final int step) throws Exception {
final TableName tableName = TableName.valueOf("testEnableTableWithFailoverAtStep" + step);
// create the table
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
MasterProcedureTestingUtility.createTable(
getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2");
UTIL.getHBaseAdmin().disableTable(tableName);
ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Delete procedure && kill the executor
long procId = procExec.submitProcedure(
new EnableTableProcedure(procExec.getEnvironment(), tableName, false));
testRecoveryAndDoubleExecution(UTIL, procId, step, EnableTableState.values());
MasterProcedureTestingUtility.validateTableIsEnabled(
UTIL.getHBaseCluster().getMaster(), tableName);
}
// ==========================================================================
// Test Helpers
// ==========================================================================
public static <TState> void testRecoveryAndDoubleExecution(final HBaseTestingUtility testUtil,
final long procId, final int lastStepBeforeFailover, TState[] states) throws Exception {
ProcedureExecutor<MasterProcedureEnv> procExec =
testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
ProcedureTestingUtility.waitProcedure(procExec, procId);
for (int i = 0; i < lastStepBeforeFailover; ++i) {
LOG.info("Restart "+ i +" exec state: " + states[i]);
ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
ProcedureTestingUtility.restart(procExec);
ProcedureTestingUtility.waitProcedure(procExec, procId);
}
ProcedureTestingUtility.assertProcNotYetCompleted(procExec, procId);
LOG.info("Trigger master failover");
masterFailover(testUtil);
procExec = testUtil.getHBaseCluster().getMaster().getMasterProcedureExecutor();
ProcedureTestingUtility.waitProcedure(procExec, procId);
ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
}
// ==========================================================================
// Master failover utils
// ==========================================================================
public static void masterFailover(final HBaseTestingUtility testUtil)
throws Exception {
MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
// Kill the master
HMaster oldMaster = cluster.getMaster();
cluster.killMaster(cluster.getMaster().getServerName());
// Wait the secondary
waitBackupMaster(testUtil, oldMaster);
}
public static void waitBackupMaster(final HBaseTestingUtility testUtil,
final HMaster oldMaster) throws Exception {
MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
HMaster newMaster = cluster.getMaster();
while (newMaster == null || newMaster == oldMaster) {
Thread.sleep(250);
newMaster = cluster.getMaster();
}
while (!(newMaster.isActiveMaster() && newMaster.isInitialized())) {
Thread.sleep(250);
}
}
// ==========================================================================
// Helpers
// ==========================================================================
private MasterProcedureEnv getMasterProcedureEnv() {
return getMasterProcedureExecutor().getEnvironment();
}
private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
}
private FileSystem getFileSystem() {
return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
}
private Path getRootDir() {
return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
}
private Path getTempDir() {
return UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getTempDir();
}
}

View File

@ -0,0 +1,72 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
/**
* Test creating/using/deleting snapshots from the client
* <p>
* This is an end-to-end test for the snapshot utility
*
* TODO This is essentially a clone of TestSnapshotFromClient. This is worth refactoring this
* because there will be a few more flavors of snapshots that need to run these tests.
*/
@Category({ClientTests.class, LargeTests.class})
public class TestMobFlushSnapshotFromClient extends TestFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
@BeforeClass
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(3);
}
protected static void setupConf(Configuration conf) {
TestFlushSnapshotFromClient.setupConf(conf);
UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@Override
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
}
@Override
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
}
@Override
protected int countRows(final Table table, final byte[]... families) throws IOException {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.wal;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.junit.BeforeClass;
import org.junit.experimental.categories.Category;
@Category({RegionServerTests.class, LargeTests.class})
public class TestWALSplitCompressed extends TestWALSplit {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TestWALSplit.setUpBeforeClass();
TEST_UTIL.getConfiguration().setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);
}
}

View File

@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.jruby.embed.PathType;
import org.junit.Test;
import org.junit.Ignore;
import org.junit.experimental.categories.Category;
@Category({ ClientTests.class, LargeTests.class })
public class TestReplicationShell extends AbstractTestShell {
@Ignore ("Disabled because hangs on occasion.. about 10% of the time") @Test
public void testRunShellTests() throws IOException {
System.setProperty("shell.test.include", "replication_admin_test.rb");
// Start all ruby tests
jruby.runScriptlet(PathType.ABSOLUTE, "src/test/ruby/tests_runner.rb");
}
}