HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213 (stack)

This commit is contained in:
stack 2016-02-08 08:43:11 -08:00 committed by Mikhail Antonov
parent fb9a8a09f7
commit 76cf0d799f
5 changed files with 20 additions and 238 deletions

View File

@ -218,16 +218,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
private static final String MAX_WAIT_FOR_SEQ_ID_KEY = "hbase.hregion.max.wait.for.sequenceid.ms";
private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 30000;
/**
* Set region to take the fast increment path. Constraint is that caller can only access the
* Cell via Increment; intermixing Increment with other Mutations will give indeterminate
* results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the latest increment
* or an Increment of zero will do the same.
*/
public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
"hbase.increment.fast.but.narrow.consistency";
private final boolean incrementFastButNarrowConsistency;
/**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
@ -759,10 +749,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
false :
conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is about.
this.incrementFastButNarrowConsistency =
this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
}
void setHTableSpecificConf() {
@ -7595,125 +7581,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// for the constraints that apply when you take this code path; it is correct but only if
// Increments are used mutating an Increment Cell; mixing concurrent Put+Delete and Increment
// will yield indeterminate results.
return this.incrementFastButNarrowConsistency?
fastAndNarrowConsistencyIncrement(mutation, nonceGroup, nonce):
slowButConsistentIncrement(mutation, nonceGroup, nonce);
return doIncrement(mutation, nonceGroup, nonce);
} finally {
if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
closeRegionOperation(op);
}
}
/**
* The bulk of this method is a bulk-and-paste of the slowButConsistentIncrement but with some
* reordering to enable the fast increment (reordering allows us to also drop some state
* carrying Lists and variables so the flow here is more straight-forward). We copy-and-paste
* because cannot break down the method further into smaller pieces. Too much state. Will redo
* in trunk and tip of branch-1 to undo duplication here and in append, checkAnd*, etc. For why
* this route is 'faster' than the alternative slowButConsistentIncrement path, see the comment
* in calling method.
* @return Resulting increment
* @throws IOException
*/
private Result fastAndNarrowConsistencyIncrement(Increment increment, long nonceGroup,
long nonce)
throws IOException {
long accumulatedResultSize = 0;
WALKey walKey = null;
long txid = 0;
// This is all kvs accumulated during this increment processing. Includes increments where the
// increment is zero: i.e. client just wants to get current state of the increment w/o
// changing it. These latter increments by zero are NOT added to the WAL.
List<Cell> allKVs = new ArrayList<Cell>(increment.size());
Durability effectiveDurability = getEffectiveDurability(increment.getDurability());
RowLock rowLock = getRowLockInternal(increment.getRow(), false);
try {
lock(this.updatesLock.readLock());
try {
if (this.coprocessorHost != null) {
Result r = this.coprocessorHost.preIncrementAfterRowLock(increment);
if (r != null) return r;
}
long now = EnvironmentEdgeManager.currentTime();
final boolean writeToWAL = effectiveDurability != Durability.SKIP_WAL;
WALEdit walEdits = null;
// Process increments a Store/family at a time.
// Accumulate edits for memstore to add later after we've added to WAL.
Map<Store, List<Cell>> forMemStore = new HashMap<Store, List<Cell>>();
for (Map.Entry<byte [], List<Cell>> entry: increment.getFamilyCellMap().entrySet()) {
byte [] columnFamilyName = entry.getKey();
List<Cell> increments = entry.getValue();
Store store = this.stores.get(columnFamilyName);
// Do increment for this store; be sure to 'sort' the increments first so increments
// match order in which we get back current Cells when we get.
List<Cell> results = applyIncrementsToColumnFamily(increment, columnFamilyName,
sort(increments, store.getComparator()), now,
MultiVersionConcurrencyControl.NO_WRITE_NUMBER, allKVs,
IsolationLevel.READ_UNCOMMITTED);
if (!results.isEmpty()) {
forMemStore.put(store, results);
// Prepare WAL updates
if (writeToWAL) {
if (walEdits == null) walEdits = new WALEdit();
walEdits.getCells().addAll(results);
}
}
}
// Actually write to WAL now. If walEdits is non-empty, we write the WAL.
if (walEdits != null && !walEdits.isEmpty()) {
// Using default cluster id, as this can only happen in the originating cluster.
// A slave cluster receives the final value (not the delta) as a Put. We use HLogKey
// here instead of WALKey directly to support legacy coprocessors.
walKey = new HLogKey(this.getRegionInfo().getEncodedNameAsBytes(),
this.htableDescriptor.getTableName(), WALKey.NO_SEQUENCE_ID, nonceGroup, nonce,
getMVCC());
txid =
this.wal.append(this.htableDescriptor, this.getRegionInfo(), walKey, walEdits, true);
} else {
// Append a faked WALEdit in order for SKIP_WAL updates to get mvccNum assigned
walKey = appendEmptyEdit(this.wal);
}
// Get WriteEntry. Will wait on assign of the sequence id. I seem to need this in
// hbase-1.2... post-12751.
walKey.getWriteEntry();
if (txid != 0) syncOrDefer(txid, effectiveDurability);
// Now write to memstore.
for (Map.Entry<Store, List<Cell>> entry: forMemStore.entrySet()) {
Store store = entry.getKey();
List<Cell> results = entry.getValue();
if (store.getFamily().getMaxVersions() == 1) {
// Upsert if VERSIONS for this CF == 1. Use write sequence id rather than read point
// when doing fast increment.
accumulatedResultSize += store.upsert(results, getSmallestReadPoint());
} else {
// Otherwise keep older versions around
for (Cell cell: results) {
accumulatedResultSize += store.add(cell);
}
}
}
// Tell mvcc this write is complete.
this.mvcc.complete(walKey.getWriteEntry());
walKey = null;
} finally {
this.updatesLock.readLock().unlock();
}
} finally {
// walKey is not null if above processing failed... cleanup the mvcc transaction.
if (walKey != null) this.mvcc.complete(walKey.getWriteEntry());
rowLock.release();
}
// Request a cache flush. Do it outside update lock.
if (isFlushSize(this.addAndGetGlobalMemstoreSize(accumulatedResultSize))) requestFlush();
return increment.isReturnResults() ? Result.create(allKVs) : null;
}
private Result slowButConsistentIncrement(Increment increment, long nonceGroup, long nonce)
throws IOException {
private Result doIncrement(Increment increment, long nonceGroup, long nonce) throws IOException {
RowLock rowLock = null;
WALKey walKey = null;
boolean doRollBackMemstore = false;

View File

@ -18,12 +18,15 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.regionserver.NoOpScanPolicyObserver;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.Before;
import org.junit.Rule;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestRule;
/**
* Test all {@link Increment} client operations with a coprocessor that
@ -34,16 +37,14 @@ import org.junit.experimental.categories.Category;
*/
@Category(LargeTests.class)
public class TestIncrementFromClientSideWithCoprocessor extends TestIncrementsFromClientSide {
public TestIncrementFromClientSideWithCoprocessor(final boolean fast) {
super(fast);
}
@Rule
public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
@Before
public void before() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
MultiRowMutationEndpoint.class.getName(), NoOpScanPolicyObserver.class.getName());
conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
super.before();
}
}

View File

@ -28,28 +28,22 @@ import java.util.Collection;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import org.junit.rules.TestRule;
/**
* Run Increment tests that use the HBase clients; {@link HTable}.
@ -60,7 +54,6 @@ import org.junit.runners.Parameterized.Parameters;
*
* Test takes a long time because spin up a cluster between each run -- ugh.
*/
@RunWith(Parameterized.class)
@Category(LargeTests.class)
@SuppressWarnings ("deprecation")
public class TestIncrementsFromClientSide {
@ -71,61 +64,23 @@ public class TestIncrementsFromClientSide {
// This test depends on there being only one slave running at at a time. See the @Before
// method where we do rolling restart.
protected static int SLAVES = 1;
private String oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY;
@Rule public TestName name = new TestName();
@Parameters(name = "fast={0}")
@Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
public static Collection<Object []> data() {
return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
}
private final boolean fast;
public TestIncrementsFromClientSide(final boolean fast) {
this.fast = fast;
}
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
MultiRowMutationEndpoint.class.getName());
MultiRowMutationEndpoint.class.getName());
conf.setBoolean("hbase.table.sanity.checks", true); // enable for below tests
// We need more than one region server in this test
TEST_UTIL.startMiniCluster(SLAVES);
}
@Before
public void before() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
if (this.fast) {
// If fast is set, set our configuration and then do a rolling restart of the one
// regionserver so it picks up the new config. Doing this should be faster than starting
// and stopping a cluster for each test.
this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
HRegionServer rs =
TEST_UTIL.getHBaseCluster().getLiveRegionServerThreads().get(0).getRegionServer();
TEST_UTIL.getHBaseCluster().startRegionServer();
rs.stop("Restart");
while(!rs.isStopped()) {
Threads.sleep(100);
LOG.info("Restarting " + rs);
}
TEST_UTIL.waitUntilNoRegionsInTransition(10000);
}
}
@After
public void after() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
if (this.fast) {
if (this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY != null) {
conf.set(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY,
this.oldINCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
}
}
}
/**
* @throws java.lang.Exception
*/
@ -151,7 +106,6 @@ public class TestIncrementsFromClientSide {
ht.incrementColumnValue(ROW, FAMILY, COLUMN, 5);
Get get = new Get(ROW);
if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
Result r = ht.get(get);
assertEquals(1, r.size());
assertEquals(5, Bytes.toLong(r.getValue(FAMILY, COLUMN)));
@ -259,7 +213,6 @@ public class TestIncrementsFromClientSide {
// Verify expected results
Get get = new Get(ROW);
if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
Result r = ht.get(get);
Cell [] kvs = r.rawCells();
assertEquals(3, kvs.length);
@ -301,7 +254,6 @@ public class TestIncrementsFromClientSide {
// Verify expected results
Get get = new Get(ROW);
if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
Result r = ht.get(get);
Cell[] kvs = r.rawCells();
assertEquals(3, kvs.length);
@ -363,7 +315,6 @@ public class TestIncrementsFromClientSide {
// Verify expected results
Get get = new Get(ROW);
if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
Result r = ht.get(get);
Cell [] kvs = r.rawCells();
assertEquals(5, kvs.length);
@ -381,7 +332,6 @@ public class TestIncrementsFromClientSide {
ht.increment(inc);
// Verify
get = new Get(ROWS[0]);
if (this.fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
r = ht.get(get);
kvs = r.rawCells();
assertEquals(QUALIFIERS.length, kvs.length);

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -177,30 +176,11 @@ public class TestAtomicOperation {
}
}
@Test
public void testIncrementMultiThreadsFastPath() throws IOException {
Configuration conf = TEST_UTIL.getConfiguration();
String oldValue = conf.get(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY);
conf.setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, true);
try {
testIncrementMultiThreads(true);
} finally {
if (oldValue != null) conf.set(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, oldValue);
}
}
/**
* Test multi-threaded increments. Take the slow but consistent path through HRegion.
*/
@Test
public void testIncrementMultiThreadsSlowPath() throws IOException {
testIncrementMultiThreads(false);
}
/**
* Test multi-threaded increments.
*/
private void testIncrementMultiThreads(final boolean fast) throws IOException {
@Test
public void testIncrementMultiThreads() throws IOException {
LOG.info("Starting test testIncrementMultiThreads");
// run a with mixed column families (1 and 3 versions)
initHRegion(tableName, name.getMethodName(), new int[] {1,3}, fam1, fam2);
@ -229,9 +209,9 @@ public class TestAtomicOperation {
LOG.info("Ignored", e);
}
}
assertICV(row, fam1, qual1, expectedTotal, fast);
assertICV(row, fam1, qual2, expectedTotal*2, fast);
assertICV(row, fam2, qual3, expectedTotal*3, fast);
assertICV(row, fam1, qual1, expectedTotal);
assertICV(row, fam1, qual2, expectedTotal*2);
assertICV(row, fam2, qual3, expectedTotal*3);
LOG.info("testIncrementMultiThreads successfully verified that total is " + expectedTotal);
}
@ -239,11 +219,9 @@ public class TestAtomicOperation {
private void assertICV(byte [] row,
byte [] familiy,
byte[] qualifier,
long amount,
boolean fast) throws IOException {
long amount) throws IOException {
// run a get and see?
Get get = new Get(row);
if (fast) get.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
get.addColumn(familiy, qualifier);
Result result = region.get(get);
assertEquals(1, result.size());

View File

@ -43,15 +43,11 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.wal.WAL;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/**
@ -64,38 +60,20 @@ import org.junit.runners.Parameterized.Parameters;
* prove atomicity on row.
*/
@Category(MediumTests.class)
@RunWith(Parameterized.class)
public class TestRegionIncrement {
private static final Log LOG = LogFactory.getLog(TestRegionIncrement.class);
@Rule public TestName name = new TestName();
@Rule public final TestRule timeout =
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
withLookingForStuckThread(true).build();
private static HBaseTestingUtility TEST_UTIL;
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static byte [] INCREMENT_BYTES = Bytes.toBytes("increment");
private static final int THREAD_COUNT = 10;
private static final int INCREMENT_COUNT = 10000;
@Parameters(name = "fast={0}")
public static Collection<Object []> data() {
return Arrays.asList(new Object[] {Boolean.FALSE}, new Object [] {Boolean.TRUE});
}
private final boolean fast;
public TestRegionIncrement(final boolean fast) {
this.fast = fast;
}
@Before
public void setUp() throws Exception {
TEST_UTIL = HBaseTestingUtility.createLocalHTU();
if (this.fast) {
TEST_UTIL.getConfiguration().
setBoolean(HRegion.INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, this.fast);
}
}
@After
public void tearDown() throws Exception {
TEST_UTIL.cleanupTestDir();