HBASE-24648 Remove the legacy 'forceSplit' related code at region server side (#1990)
Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
parent
ed6104418c
commit
3effd28a75
|
@ -167,12 +167,15 @@ public class SplitTableRegionProcedure
|
|||
return daughterTwoRI;
|
||||
}
|
||||
|
||||
private boolean hasBestSplitRow() {
|
||||
return bestSplitRow != null && bestSplitRow.length > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether the region is splittable
|
||||
* @param env MasterProcedureEnv
|
||||
* @param regionToSplit parent Region to be split
|
||||
* @param splitRow if splitRow is not specified, will first try to get bestSplitRow from RS
|
||||
* @throws IOException
|
||||
*/
|
||||
private void checkSplittable(final MasterProcedureEnv env,
|
||||
final RegionInfo regionToSplit, final byte[] splitRow) throws IOException {
|
||||
|
@ -187,19 +190,20 @@ public class SplitTableRegionProcedure
|
|||
boolean splittable = false;
|
||||
if (node != null) {
|
||||
try {
|
||||
if (bestSplitRow == null || bestSplitRow.length == 0) {
|
||||
LOG
|
||||
.info("splitKey isn't explicitly specified, will try to find a best split key from RS");
|
||||
}
|
||||
// Always set bestSplitRow request as true here,
|
||||
// need to call Region#checkSplit to check it splittable or not
|
||||
GetRegionInfoResponse response = AssignmentManagerUtil.getRegionInfoResponse(env,
|
||||
node.getRegionLocation(), node.getRegionInfo(), true);
|
||||
if(bestSplitRow == null || bestSplitRow.length == 0) {
|
||||
bestSplitRow = response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
|
||||
GetRegionInfoResponse response;
|
||||
if (!hasBestSplitRow()) {
|
||||
LOG.info(
|
||||
"{} splitKey isn't explicitly specified, will try to find a best split key from RS {}",
|
||||
node.getRegionInfo().getRegionNameAsString(), node.getRegionLocation());
|
||||
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(),
|
||||
node.getRegionInfo(), true);
|
||||
bestSplitRow =
|
||||
response.hasBestSplitRow() ? response.getBestSplitRow().toByteArray() : null;
|
||||
} else {
|
||||
response = AssignmentManagerUtil.getRegionInfoResponse(env, node.getRegionLocation(),
|
||||
node.getRegionInfo(), false);
|
||||
}
|
||||
splittable = response.hasSplittable() && response.getSplittable();
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Splittable=" + splittable + " " + node.toShortString());
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati
|
|||
HRegion hr = (HRegion)r;
|
||||
try {
|
||||
if (shouldSplitRegion() && hr.getCompactPriority() >= PRIORITY_USER) {
|
||||
byte[] midKey = hr.checkSplit();
|
||||
byte[] midKey = hr.checkSplit().orElse(null);
|
||||
if (midKey != null) {
|
||||
requestSplit(r, midKey);
|
||||
return true;
|
||||
|
@ -216,9 +216,6 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati
|
|||
if (midKey == null) {
|
||||
LOG.debug("Region " + r.getRegionInfo().getRegionNameAsString() +
|
||||
" not splittable because midkey=null");
|
||||
if (((HRegion)r).shouldForceSplit()) {
|
||||
((HRegion)r).clearSplit();
|
||||
}
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -68,7 +68,6 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
|
|||
|
||||
@Override
|
||||
protected boolean shouldSplit() {
|
||||
boolean force = region.shouldForceSplit();
|
||||
boolean foundABigStore = false;
|
||||
|
||||
for (HStore store : region.getStores()) {
|
||||
|
@ -84,7 +83,7 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
|
|||
}
|
||||
}
|
||||
|
||||
return foundABigStore || force;
|
||||
return foundABigStore;
|
||||
}
|
||||
|
||||
long getDesiredMaxFileSize() {
|
||||
|
|
|
@ -22,15 +22,20 @@ import org.apache.yetus.audience.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
|
||||
/**
|
||||
* A {@link RegionSplitPolicy} that disables region splits.
|
||||
* This should be used with care, since it will disable automatic sharding.
|
||||
* Most of the time, using {@link ConstantSizeRegionSplitPolicy} with a
|
||||
* large region size (10GB, etc) is safer.
|
||||
* A {@link RegionSplitPolicy} that disables region splits. This should be used with care, since it
|
||||
* will disable automatic sharding. Most of the time, using {@link ConstantSizeRegionSplitPolicy}
|
||||
* with a large region size (10GB, etc) is safer.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
|
||||
public class DisabledRegionSplitPolicy extends RegionSplitPolicy {
|
||||
|
||||
@Override
|
||||
protected boolean shouldSplit() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean canSplit() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -691,8 +691,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
|
||||
// Stop updates lock
|
||||
private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock();
|
||||
private boolean splitRequest;
|
||||
private byte[] explicitSplitPoint = null;
|
||||
|
||||
private final MultiVersionConcurrencyControl mvcc;
|
||||
|
||||
|
@ -1463,7 +1461,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
|
||||
@Override
|
||||
public boolean isSplittable() {
|
||||
return isAvailable() && !hasReferences();
|
||||
return splitPolicy.canSplit();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1962,7 +1960,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
/**
|
||||
* @return split policy for this region.
|
||||
*/
|
||||
public RegionSplitPolicy getSplitPolicy() {
|
||||
@VisibleForTesting
|
||||
RegionSplitPolicy getSplitPolicy() {
|
||||
return this.splitPolicy;
|
||||
}
|
||||
|
||||
|
@ -8346,9 +8345,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
|
||||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
ClassSize.OBJECT +
|
||||
ClassSize.ARRAY +
|
||||
55 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
|
||||
(15 * Bytes.SIZEOF_LONG) +
|
||||
56 * ClassSize.REFERENCE +
|
||||
3 * Bytes.SIZEOF_INT +
|
||||
14 * Bytes.SIZEOF_LONG +
|
||||
3 * Bytes.SIZEOF_BOOLEAN);
|
||||
|
||||
// woefully out of date - currently missing:
|
||||
|
@ -8483,51 +8482,27 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
return responseBuilder.build();
|
||||
}
|
||||
|
||||
boolean shouldForceSplit() {
|
||||
return this.splitRequest;
|
||||
}
|
||||
|
||||
byte[] getExplicitSplitPoint() {
|
||||
return this.explicitSplitPoint;
|
||||
}
|
||||
|
||||
void forceSplit(byte[] sp) {
|
||||
// This HRegion will go away after the forced split is successful
|
||||
// But if a forced split fails, we need to clear forced split.
|
||||
this.splitRequest = true;
|
||||
if (sp != null) {
|
||||
this.explicitSplitPoint = sp;
|
||||
}
|
||||
}
|
||||
|
||||
void clearSplit() {
|
||||
this.splitRequest = false;
|
||||
this.explicitSplitPoint = null;
|
||||
public Optional<byte[]> checkSplit() {
|
||||
return checkSplit(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the splitpoint. null indicates the region isn't splittable
|
||||
* If the splitpoint isn't explicitly specified, it will go over the stores
|
||||
* to find the best splitpoint. Currently the criteria of best splitpoint
|
||||
* is based on the size of the store.
|
||||
* Return the split point. An empty result indicates the region isn't splittable.
|
||||
*/
|
||||
public byte[] checkSplit() {
|
||||
public Optional<byte[]> checkSplit(boolean force) {
|
||||
// Can't split META
|
||||
if (this.getRegionInfo().isMetaRegion() ||
|
||||
TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) {
|
||||
if (shouldForceSplit()) {
|
||||
LOG.warn("Cannot split meta region in HBase 0.20 and above");
|
||||
}
|
||||
return null;
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
// Can't split a region that is closing.
|
||||
if (this.isClosing()) {
|
||||
return null;
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
if (!splitPolicy.shouldSplit()) {
|
||||
return null;
|
||||
if (!force && !splitPolicy.shouldSplit()) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
byte[] ret = splitPolicy.getSplitPoint();
|
||||
|
@ -8537,10 +8512,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
checkRow(ret, "calculated split");
|
||||
} catch (IOException e) {
|
||||
LOG.error("Ignoring invalid split for region {}", this, e);
|
||||
return null;
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(ret);
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -70,7 +70,6 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS
|
|||
|
||||
@Override
|
||||
protected boolean shouldSplit() {
|
||||
boolean force = region.shouldForceSplit();
|
||||
boolean foundABigStore = false;
|
||||
// Get count of regions that have the same common table as this.region
|
||||
int tableRegionsCount = getCountOfCommonTableRegions();
|
||||
|
@ -95,7 +94,7 @@ public class IncreasingToUpperBoundRegionSplitPolicy extends ConstantSizeRegionS
|
|||
}
|
||||
}
|
||||
|
||||
return foundABigStore || force;
|
||||
return foundABigStore;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -610,7 +610,7 @@ class MemStoreFlusher implements FlushRequester {
|
|||
FlushResult flushResult = region.flushcache(forceFlushAllStores, false, tracker);
|
||||
boolean shouldCompact = flushResult.isCompactionNeeded();
|
||||
// We just want to check the size
|
||||
boolean shouldSplit = region.checkSplit() != null;
|
||||
boolean shouldSplit = region.checkSplit().isPresent();
|
||||
if (shouldSplit) {
|
||||
this.server.compactSplitThread.requestSplit(region);
|
||||
} else if (shouldCompact) {
|
||||
|
|
|
@ -1839,29 +1839,24 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
requestCount.increment();
|
||||
HRegion region = getRegion(request.getRegion());
|
||||
RegionInfo info = region.getRegionInfo();
|
||||
byte[] bestSplitRow = null;
|
||||
boolean shouldSplit = true;
|
||||
byte[] bestSplitRow;
|
||||
if (request.hasBestSplitRow() && request.getBestSplitRow()) {
|
||||
HRegion r = region;
|
||||
region.startRegionOperation(Operation.SPLIT_REGION);
|
||||
r.forceSplit(null);
|
||||
// Even after setting force split if split policy says no to split then we should not split.
|
||||
shouldSplit = region.getSplitPolicy().shouldSplit() && !info.isMetaRegion();
|
||||
bestSplitRow = r.checkSplit();
|
||||
bestSplitRow = region.checkSplit(true).orElse(null);
|
||||
// when all table data are in memstore, bestSplitRow = null
|
||||
// try to flush region first
|
||||
if (bestSplitRow == null) {
|
||||
r.flush(true);
|
||||
bestSplitRow = r.checkSplit();
|
||||
region.flush(true);
|
||||
bestSplitRow = region.checkSplit(true).orElse(null);
|
||||
}
|
||||
r.clearSplit();
|
||||
} else {
|
||||
bestSplitRow = null;
|
||||
}
|
||||
GetRegionInfoResponse.Builder builder = GetRegionInfoResponse.newBuilder();
|
||||
builder.setRegionInfo(ProtobufUtil.toRegionInfo(info));
|
||||
if (request.hasCompactionState() && request.getCompactionState()) {
|
||||
builder.setCompactionState(ProtobufUtil.createCompactionState(region.getCompactionState()));
|
||||
}
|
||||
builder.setSplittable(region.isSplittable() && shouldSplit);
|
||||
builder.setSplittable(region.isSplittable());
|
||||
builder.setMergeable(region.isMergeable());
|
||||
if (request.hasBestSplitRow() && request.getBestSplitRow() && bestSplitRow != null) {
|
||||
builder.setBestSplitRow(UnsafeByteOperations.unsafeWrap(bestSplitRow));
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
@ -70,16 +71,21 @@ public abstract class RegionSplitPolicy extends Configured {
|
|||
*/
|
||||
protected abstract boolean shouldSplit();
|
||||
|
||||
/**
|
||||
* @return {@code true} if the specified region can be split.
|
||||
*/
|
||||
protected boolean canSplit() {
|
||||
return !region.getRegionInfo().isMetaRegion() &&
|
||||
!TableName.NAMESPACE_TABLE_NAME.equals(region.getRegionInfo().getTable()) &&
|
||||
region.isAvailable() && !region.hasReferences();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the key at which the region should be split, or null
|
||||
* if it cannot be split. This will only be called if shouldSplit
|
||||
* previously returned true.
|
||||
*/
|
||||
protected byte[] getSplitPoint() {
|
||||
byte[] explicitSplitPoint = this.region.getExplicitSplitPoint();
|
||||
if (explicitSplitPoint != null) {
|
||||
return explicitSplitPoint;
|
||||
}
|
||||
List<HStore> stores = region.getStores();
|
||||
|
||||
byte[] splitPointFromLargestStore = null;
|
||||
|
|
|
@ -3247,9 +3247,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
unassignRegion(hrl.getRegionInfo().getRegionName());
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* Retrieves a splittable region randomly from tableName
|
||||
*
|
||||
* @param tableName name of table
|
||||
* @param maxAttempts maximum number of attempts, unlimited for value of -1
|
||||
* @return the HRegion chosen, null if none was found within limit of maxAttempts
|
||||
|
@ -3272,15 +3271,14 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
|
|||
if (regCount > 0) {
|
||||
idx = random.nextInt(regCount);
|
||||
// if we have just tried this region, there is no need to try again
|
||||
if (attempted.contains(idx))
|
||||
if (attempted.contains(idx)) {
|
||||
continue;
|
||||
try {
|
||||
regions.get(idx).checkSplit();
|
||||
return regions.get(idx);
|
||||
} catch (Exception ex) {
|
||||
LOG.warn("Caught exception", ex);
|
||||
attempted.add(idx);
|
||||
}
|
||||
HRegion region = regions.get(idx);
|
||||
if (region.checkSplit().isPresent()) {
|
||||
return region;
|
||||
}
|
||||
attempted.add(idx);
|
||||
}
|
||||
attempts++;
|
||||
} while (maxAttempts == -1 || attempts < maxAttempts);
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.junit.Assert.assertArrayEquals;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -69,6 +70,7 @@ import org.junit.experimental.categories.Category;
|
|||
import org.junit.rules.TestName;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
|
||||
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hbase.thirdparty.com.google.common.io.Closeables;
|
||||
|
@ -120,7 +122,6 @@ public class TestEndToEndSplitTransaction {
|
|||
admin.createTable(htd);
|
||||
TEST_UTIL.loadTable(source, fam);
|
||||
compactSplit.setCompactionsEnabled(false);
|
||||
TEST_UTIL.getHBaseCluster().getRegions(tableName).get(0).forceSplit(null);
|
||||
admin.split(tableName);
|
||||
TEST_UTIL.waitFor(60000, () -> TEST_UTIL.getHBaseCluster().getRegions(tableName).size() == 2);
|
||||
|
||||
|
|
|
@ -874,9 +874,6 @@ public class TestHStore {
|
|||
public void testSplitWithEmptyColFam() throws IOException {
|
||||
init(this.name.getMethodName());
|
||||
assertFalse(store.getSplitPoint().isPresent());
|
||||
store.getHRegion().forceSplit(null);
|
||||
assertFalse(store.getSplitPoint().isPresent());
|
||||
store.getHRegion().clearSplit();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -21,6 +21,9 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -30,20 +33,19 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
@Category({ RegionServerTests.class, SmallTests.class })
|
||||
public class TestRegionSplitPolicy {
|
||||
|
@ -53,35 +55,30 @@ public class TestRegionSplitPolicy {
|
|||
HBaseClassTestRule.forClass(TestRegionSplitPolicy.class);
|
||||
|
||||
private Configuration conf;
|
||||
private HTableDescriptor htd;
|
||||
private HRegion mockRegion;
|
||||
private List<HStore> stores;
|
||||
private static final TableName TABLENAME = TableName.valueOf("t");
|
||||
|
||||
@Rule
|
||||
public TestName name = new TestName();
|
||||
|
||||
@Before
|
||||
public void setupMocks() {
|
||||
conf = HBaseConfiguration.create();
|
||||
HRegionInfo hri = new HRegionInfo(TABLENAME);
|
||||
htd = new HTableDescriptor(TABLENAME);
|
||||
mockRegion = Mockito.mock(HRegion.class);
|
||||
Mockito.doReturn(htd).when(mockRegion).getTableDescriptor();
|
||||
Mockito.doReturn(hri).when(mockRegion).getRegionInfo();
|
||||
RegionInfo hri = RegionInfoBuilder.newBuilder(TABLENAME).build();
|
||||
mockRegion = mock(HRegion.class);
|
||||
doReturn(hri).when(mockRegion).getRegionInfo();
|
||||
stores = new ArrayList<>();
|
||||
Mockito.doReturn(stores).when(mockRegion).getStores();
|
||||
doReturn(stores).when(mockRegion).getStores();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testForceSplitRegionWithReference() throws IOException {
|
||||
htd.setMaxFileSize(1024L);
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(1024L).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
// Add a store above the requisite size. Should split.
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
// Act as if there's a reference file or some other reason it can't split.
|
||||
// This should prevent splitting even though it's big enough.
|
||||
Mockito.doReturn(false).when(mockStore).canSplit();
|
||||
doReturn(false).when(mockStore).canSplit();
|
||||
stores.add(mockStore);
|
||||
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
|
@ -89,16 +86,11 @@ public class TestRegionSplitPolicy {
|
|||
ConstantSizeRegionSplitPolicy policy =
|
||||
(ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
assertFalse(policy.shouldSplit());
|
||||
Mockito.doReturn(true).when(mockRegion).shouldForceSplit();
|
||||
assertFalse(policy.shouldSplit());
|
||||
|
||||
Mockito.doReturn(false).when(mockRegion).shouldForceSplit();
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
IncreasingToUpperBoundRegionSplitPolicy.class.getName());
|
||||
policy = (IncreasingToUpperBoundRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
assertFalse(policy.shouldSplit());
|
||||
Mockito.doReturn(true).when(mockRegion).shouldForceSplit();
|
||||
assertFalse(policy.shouldSplit());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -108,18 +100,19 @@ public class TestRegionSplitPolicy {
|
|||
IncreasingToUpperBoundRegionSplitPolicy.class.getName());
|
||||
// Now make it so the mock region has a RegionServerService that will
|
||||
// return 'online regions'.
|
||||
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
||||
RegionServerServices rss = mock(RegionServerServices.class);
|
||||
final List<HRegion> regions = new ArrayList<>();
|
||||
Mockito.doReturn(regions).when(rss).getRegions(TABLENAME);
|
||||
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||
doReturn(regions).when(rss).getRegions(TABLENAME);
|
||||
when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||
// Set max size for this 'table'.
|
||||
long maxSplitSize = 1024L;
|
||||
htd.setMaxFileSize(maxSplitSize);
|
||||
// Set flush size to 1/8. IncreasingToUpperBoundRegionSplitPolicy
|
||||
// grows by the cube of the number of regions times flushsize each time.
|
||||
long flushSize = maxSplitSize / 8;
|
||||
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize);
|
||||
htd.setMemStoreFlushSize(flushSize);
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(maxSplitSize)
|
||||
.setMemStoreFlushSize(flushSize).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
// If RegionServerService with no regions in it -- 'online regions' == 0 --
|
||||
// then IncreasingToUpperBoundRegionSplitPolicy should act like a
|
||||
// ConstantSizePolicy
|
||||
|
@ -130,9 +123,9 @@ public class TestRegionSplitPolicy {
|
|||
// Add a store in excess of split size. Because there are "no regions"
|
||||
// on this server -- rss.getOnlineRegions is 0 -- then we should split
|
||||
// like a constantsizeregionsplitpolicy would
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
stores.add(mockStore);
|
||||
// It should split
|
||||
assertTrue(policy.shouldSplit());
|
||||
|
@ -140,18 +133,18 @@ public class TestRegionSplitPolicy {
|
|||
// Now test that we increase our split size as online regions for a table
|
||||
// grows. With one region, split size should be flushsize.
|
||||
regions.add(mockRegion);
|
||||
Mockito.doReturn(flushSize).when(mockStore).getSize();
|
||||
doReturn(flushSize).when(mockStore).getSize();
|
||||
// Should not split since store is flush size.
|
||||
assertFalse(policy.shouldSplit());
|
||||
// Set size of store to be > 2*flush size and we should split
|
||||
Mockito.doReturn(flushSize*2 + 1).when(mockStore).getSize();
|
||||
doReturn(flushSize * 2 + 1).when(mockStore).getSize();
|
||||
assertTrue(policy.shouldSplit());
|
||||
// Add another region to the 'online regions' on this server and we should
|
||||
// now be no longer be splittable since split size has gone up.
|
||||
regions.add(mockRegion);
|
||||
assertFalse(policy.shouldSplit());
|
||||
// make sure its just over; verify it'll split
|
||||
Mockito.doReturn((long)(maxSplitSize * 1.25 + 1)).when(mockStore).getSize();
|
||||
doReturn((long) (maxSplitSize * 1.25 + 1)).when(mockStore).getSize();
|
||||
assertTrue(policy.shouldSplit());
|
||||
|
||||
// Finally assert that even if loads of regions, we'll split at max size
|
||||
|
@ -162,41 +155,39 @@ public class TestRegionSplitPolicy {
|
|||
|
||||
@Test
|
||||
public void testBusyRegionSplitPolicy() throws Exception {
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
BusyRegionSplitPolicy.class.getName());
|
||||
doReturn(TableDescriptorBuilder.newBuilder(TABLENAME).build()).when(mockRegion)
|
||||
.getTableDescriptor();
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, BusyRegionSplitPolicy.class.getName());
|
||||
conf.setLong("hbase.busy.policy.minAge", 1000000L);
|
||||
conf.setFloat("hbase.busy.policy.blockedRequests", 0.1f);
|
||||
|
||||
RegionServerServices rss = Mockito.mock(RegionServerServices.class);
|
||||
RegionServerServices rss = mock(RegionServerServices.class);
|
||||
final List<HRegion> regions = new ArrayList<>();
|
||||
Mockito.doReturn(regions).when(rss).getRegions(TABLENAME);
|
||||
Mockito.when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
|
||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
|
||||
|
||||
doReturn(regions).when(rss).getRegions(TABLENAME);
|
||||
when(mockRegion.getRegionServerServices()).thenReturn(rss);
|
||||
when(mockRegion.getBlockedRequestsCount()).thenReturn(0L);
|
||||
when(mockRegion.getWriteRequestsCount()).thenReturn(0L);
|
||||
|
||||
BusyRegionSplitPolicy policy =
|
||||
(BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
|
||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(10L);
|
||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(10L);
|
||||
when(mockRegion.getBlockedRequestsCount()).thenReturn(10L);
|
||||
when(mockRegion.getWriteRequestsCount()).thenReturn(10L);
|
||||
// Not enough time since region came online
|
||||
assertFalse(policy.shouldSplit());
|
||||
|
||||
|
||||
// Reset min age for split to zero
|
||||
conf.setLong("hbase.busy.policy.minAge", 0L);
|
||||
// Aggregate over 500 ms periods
|
||||
conf.setLong("hbase.busy.policy.aggWindow", 500L);
|
||||
policy =
|
||||
(BusyRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf);
|
||||
policy = (BusyRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
long start = EnvironmentEdgeManager.currentTime();
|
||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(10L);
|
||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(20L);
|
||||
when(mockRegion.getBlockedRequestsCount()).thenReturn(10L);
|
||||
when(mockRegion.getWriteRequestsCount()).thenReturn(20L);
|
||||
Thread.sleep(300);
|
||||
assertFalse(policy.shouldSplit());
|
||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(12L);
|
||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(30L);
|
||||
when(mockRegion.getBlockedRequestsCount()).thenReturn(12L);
|
||||
when(mockRegion.getWriteRequestsCount()).thenReturn(30L);
|
||||
Thread.sleep(2);
|
||||
// Enough blocked requests since last time, but aggregate blocked request
|
||||
// rate over last 500 ms is still low, because major portion of the window is constituted
|
||||
|
@ -204,8 +195,8 @@ public class TestRegionSplitPolicy {
|
|||
if (EnvironmentEdgeManager.currentTime() - start < 500) {
|
||||
assertFalse(policy.shouldSplit());
|
||||
}
|
||||
Mockito.when(mockRegion.getBlockedRequestsCount()).thenReturn(14L);
|
||||
Mockito.when(mockRegion.getWriteRequestsCount()).thenReturn(40L);
|
||||
when(mockRegion.getBlockedRequestsCount()).thenReturn(14L);
|
||||
when(mockRegion.getWriteRequestsCount()).thenReturn(40L);
|
||||
Thread.sleep(200);
|
||||
assertTrue(policy.shouldSplit());
|
||||
}
|
||||
|
@ -213,25 +204,24 @@ public class TestRegionSplitPolicy {
|
|||
private void assertWithinJitter(long maxSplitSize, long sizeToCheck) {
|
||||
assertTrue("Size greater than lower bound of jitter",
|
||||
(long) (maxSplitSize * 0.75) <= sizeToCheck);
|
||||
assertTrue("Size less than upper bound of jitter",
|
||||
(long)(maxSplitSize * 1.25) >= sizeToCheck);
|
||||
assertTrue("Size less than upper bound of jitter", (long) (maxSplitSize * 1.25) >= sizeToCheck);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateDefault() throws IOException {
|
||||
conf.setLong(HConstants.HREGION_MAX_FILESIZE, 1234L);
|
||||
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
// Using a default HTD, should pick up the file size from
|
||||
// configuration.
|
||||
ConstantSizeRegionSplitPolicy policy =
|
||||
(ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(
|
||||
mockRegion, conf);
|
||||
(ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
assertWithinJitter(1234L, policy.getDesiredMaxFileSize());
|
||||
|
||||
// If specified in HTD, should use that
|
||||
htd.setMaxFileSize(9999L);
|
||||
policy = (ConstantSizeRegionSplitPolicy)RegionSplitPolicy.create(
|
||||
mockRegion, conf);
|
||||
td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(9999L).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
policy = (ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
assertWithinJitter(9999L, policy.getDesiredMaxFileSize());
|
||||
}
|
||||
|
||||
|
@ -240,39 +230,28 @@ public class TestRegionSplitPolicy {
|
|||
*/
|
||||
@Test
|
||||
public void testCustomPolicy() throws IOException {
|
||||
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
|
||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||
KeyPrefixRegionSplitPolicy.class.getName());
|
||||
myHtd.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, String.valueOf(2));
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME)
|
||||
.setRegionSplitPolicyClassName(KeyPrefixRegionSplitPolicy.class.getName())
|
||||
.setValue(KeyPrefixRegionSplitPolicy.PREFIX_LENGTH_KEY, "2").build();
|
||||
|
||||
HRegion myMockRegion = Mockito.mock(HRegion.class);
|
||||
Mockito.doReturn(myHtd).when(myMockRegion).getTableDescriptor();
|
||||
Mockito.doReturn(stores).when(myMockRegion).getStores();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
Mockito.doReturn(Optional.of(Bytes.toBytes("abcd"))).when(mockStore).getSplitPoint();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
doReturn(Optional.of(Bytes.toBytes("abcd"))).when(mockStore).getSplitPoint();
|
||||
stores.add(mockStore);
|
||||
|
||||
KeyPrefixRegionSplitPolicy policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
|
||||
.create(myMockRegion, conf);
|
||||
KeyPrefixRegionSplitPolicy policy =
|
||||
(KeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
|
||||
assertEquals("ab", Bytes.toString(policy.getSplitPoint()));
|
||||
|
||||
Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
|
||||
Mockito.doReturn(Bytes.toBytes("efgh")).when(myMockRegion)
|
||||
.getExplicitSplitPoint();
|
||||
|
||||
policy = (KeyPrefixRegionSplitPolicy) RegionSplitPolicy
|
||||
.create(myMockRegion, conf);
|
||||
|
||||
assertEquals("ef", Bytes.toString(policy.getSplitPoint()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testConstantSizePolicy() throws IOException {
|
||||
htd.setMaxFileSize(1024L);
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(1024L).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
ConstantSizeRegionSplitPolicy policy =
|
||||
(ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
doConstantSizePolicyTests(policy);
|
||||
|
@ -280,35 +259,29 @@ public class TestRegionSplitPolicy {
|
|||
|
||||
/**
|
||||
* Run through tests for a ConstantSizeRegionSplitPolicy
|
||||
* @param policy
|
||||
*/
|
||||
private void doConstantSizePolicyTests(final ConstantSizeRegionSplitPolicy policy) {
|
||||
// For no stores, should not split
|
||||
assertFalse(policy.shouldSplit());
|
||||
|
||||
// Add a store above the requisite size. Should split.
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
stores.add(mockStore);
|
||||
|
||||
assertTrue(policy.shouldSplit());
|
||||
|
||||
// Act as if there's a reference file or some other reason it can't split.
|
||||
// This should prevent splitting even though it's big enough.
|
||||
Mockito.doReturn(false).when(mockStore).canSplit();
|
||||
doReturn(false).when(mockStore).canSplit();
|
||||
assertFalse(policy.shouldSplit());
|
||||
|
||||
// Reset splittability after above
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
|
||||
// Set to a small size but turn on forceSplit. Should result in a split.
|
||||
Mockito.doReturn(true).when(mockRegion).shouldForceSplit();
|
||||
Mockito.doReturn(100L).when(mockStore).getSize();
|
||||
assertTrue(policy.shouldSplit());
|
||||
|
||||
// Turn off forceSplit, should not split
|
||||
Mockito.doReturn(false).when(mockRegion).shouldForceSplit();
|
||||
// Set to a small size, should not split
|
||||
doReturn(100L).when(mockStore).getSize();
|
||||
assertFalse(policy.shouldSplit());
|
||||
|
||||
// Clear families we added above
|
||||
|
@ -317,6 +290,9 @@ public class TestRegionSplitPolicy {
|
|||
|
||||
@Test
|
||||
public void testGetSplitPoint() throws IOException {
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
|
||||
ConstantSizeRegionSplitPolicy policy =
|
||||
(ConstantSizeRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
|
||||
|
@ -325,59 +301,45 @@ public class TestRegionSplitPolicy {
|
|||
assertNull(policy.getSplitPoint());
|
||||
|
||||
// Add a store above the requisite size. Should split.
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
Mockito.doReturn(Optional.of(Bytes.toBytes("store 1 split"))).when(mockStore).getSplitPoint();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
doReturn(Optional.of(Bytes.toBytes("store 1 split"))).when(mockStore).getSplitPoint();
|
||||
stores.add(mockStore);
|
||||
|
||||
assertEquals("store 1 split",
|
||||
Bytes.toString(policy.getSplitPoint()));
|
||||
assertEquals("store 1 split", Bytes.toString(policy.getSplitPoint()));
|
||||
|
||||
// Add a bigger store. The split point should come from that one
|
||||
HStore mockStore2 = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(4000L).when(mockStore2).getSize();
|
||||
Mockito.doReturn(true).when(mockStore2).canSplit();
|
||||
Mockito.doReturn(Optional.of(Bytes.toBytes("store 2 split"))).when(mockStore2).getSplitPoint();
|
||||
HStore mockStore2 = mock(HStore.class);
|
||||
doReturn(4000L).when(mockStore2).getSize();
|
||||
doReturn(true).when(mockStore2).canSplit();
|
||||
doReturn(Optional.of(Bytes.toBytes("store 2 split"))).when(mockStore2).getSplitPoint();
|
||||
stores.add(mockStore2);
|
||||
|
||||
assertEquals("store 2 split",
|
||||
Bytes.toString(policy.getSplitPoint()));
|
||||
assertEquals("store 2 split", Bytes.toString(policy.getSplitPoint()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDelimitedKeyPrefixRegionSplitPolicy() throws IOException {
|
||||
HTableDescriptor myHtd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
|
||||
myHtd.setValue(HTableDescriptor.SPLIT_POLICY,
|
||||
DelimitedKeyPrefixRegionSplitPolicy.class.getName());
|
||||
myHtd.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",");
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(TABLENAME)
|
||||
.setRegionSplitPolicyClassName(DelimitedKeyPrefixRegionSplitPolicy.class.getName())
|
||||
.setValue(DelimitedKeyPrefixRegionSplitPolicy.DELIMITER_KEY, ",").build();
|
||||
|
||||
HRegion myMockRegion = Mockito.mock(HRegion.class);
|
||||
Mockito.doReturn(myHtd).when(myMockRegion).getTableDescriptor();
|
||||
Mockito.doReturn(stores).when(myMockRegion).getStores();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
doReturn(stores).when(mockRegion).getStores();
|
||||
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
Mockito.doReturn(Optional.of(Bytes.toBytes("ab,cd"))).when(mockStore).getSplitPoint();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
doReturn(Optional.of(Bytes.toBytes("ab,cd"))).when(mockStore).getSplitPoint();
|
||||
stores.add(mockStore);
|
||||
|
||||
DelimitedKeyPrefixRegionSplitPolicy policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
|
||||
.create(myMockRegion, conf);
|
||||
DelimitedKeyPrefixRegionSplitPolicy policy =
|
||||
(DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy.create(mockRegion, conf);
|
||||
|
||||
assertEquals("ab", Bytes.toString(policy.getSplitPoint()));
|
||||
|
||||
Mockito.doReturn(true).when(myMockRegion).shouldForceSplit();
|
||||
Mockito.doReturn(Bytes.toBytes("efg,h")).when(myMockRegion)
|
||||
.getExplicitSplitPoint();
|
||||
|
||||
policy = (DelimitedKeyPrefixRegionSplitPolicy) RegionSplitPolicy
|
||||
.create(myMockRegion, conf);
|
||||
|
||||
assertEquals("efg", Bytes.toString(policy.getSplitPoint()));
|
||||
|
||||
Mockito.doReturn(Bytes.toBytes("ijk")).when(myMockRegion)
|
||||
.getExplicitSplitPoint();
|
||||
doReturn(Optional.of(Bytes.toBytes("ijk"))).when(mockStore).getSplitPoint();
|
||||
assertEquals("ijk", Bytes.toString(policy.getSplitPoint()));
|
||||
}
|
||||
|
||||
|
@ -385,7 +347,9 @@ public class TestRegionSplitPolicy {
|
|||
public void testConstantSizePolicyWithJitter() throws IOException {
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
htd.setMaxFileSize(Long.MAX_VALUE);
|
||||
TableDescriptor td =
|
||||
TableDescriptorBuilder.newBuilder(TABLENAME).setMaxFileSize(Long.MAX_VALUE).build();
|
||||
doReturn(td).when(mockRegion).getTableDescriptor();
|
||||
boolean positiveJitter = false;
|
||||
ConstantSizeRegionSplitPolicy policy = null;
|
||||
while (!positiveJitter) {
|
||||
|
@ -393,12 +357,11 @@ public class TestRegionSplitPolicy {
|
|||
positiveJitter = policy.positiveJitterRate();
|
||||
}
|
||||
// add a store
|
||||
HStore mockStore = Mockito.mock(HStore.class);
|
||||
Mockito.doReturn(2000L).when(mockStore).getSize();
|
||||
Mockito.doReturn(true).when(mockStore).canSplit();
|
||||
HStore mockStore = mock(HStore.class);
|
||||
doReturn(2000L).when(mockStore).getSize();
|
||||
doReturn(true).when(mockStore).canSplit();
|
||||
stores.add(mockStore);
|
||||
// Jitter shouldn't cause overflow when HTableDescriptor.MAX_FILESIZE set to Long.MAX_VALUE
|
||||
assertFalse(policy.shouldSplit());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue