plans) {
+ // as of this writing, `plan.submit()` is non-blocking and uses Async Admin APIs to submit
+ // task, so there's no artificial rate-limiting of merge/split requests due to this serial loop.
+ for (NormalizationPlan plan : plans) {
+ switch (plan.getType()) {
+ case MERGE: {
+ submitMergePlan((MergeNormalizationPlan) plan);
+ break;
+ }
+ case SPLIT: {
+ submitSplitPlan((SplitNormalizationPlan) plan);
+ break;
+ }
+ case NONE:
+ LOG.debug("Nothing to do for {} with PlanType=NONE. Ignoring.", plan);
+ planSkipped(plan.getType());
+ break;
+ default:
+ LOG.warn("Plan {} is of an unrecognized PlanType. Ignoring.", plan);
+ planSkipped(plan.getType());
+ break;
+ }
+ }
+ }
+
+ /**
+ * Interacts with {@link MasterServices} in order to execute a plan.
+ */
+ private void submitMergePlan(final MergeNormalizationPlan plan) {
+ final int totalSizeMb;
+ try {
+ final long totalSizeMbLong = plan.getNormalizationTargets()
+ .stream()
+ .mapToLong(NormalizationTarget::getRegionSizeMb)
+ .reduce(0, Math::addExact);
+ totalSizeMb = Math.toIntExact(totalSizeMbLong);
+ } catch (ArithmeticException e) {
+ LOG.debug("Sum of merge request size overflows rate limiter data type. {}", plan);
+ planSkipped(plan.getType());
+ return;
+ }
+
+ final RegionInfo[] infos = plan.getNormalizationTargets()
+ .stream()
+ .map(NormalizationTarget::getRegionInfo)
+ .toArray(RegionInfo[]::new);
+ final long pid;
+ try {
+ pid = masterServices.mergeRegions(
+ infos, false, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ } catch (IOException e) {
+ LOG.info("failed to submit plan {}.", plan, e);
+ planSkipped(plan.getType());
+ return;
+ }
+ mergePlanCount++;
+ LOG.info("Submitted {} resulting in pid {}", plan, pid);
+ final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb)));
+ LOG.debug("Rate limiting delayed the worker by {}", Duration.ofSeconds(rateLimitedSecs));
+ }
+
+ /**
+ * Interacts with {@link MasterServices} in order to execute a plan.
+ */
+ private void submitSplitPlan(final SplitNormalizationPlan plan) {
+ final int totalSizeMb;
+ try {
+ totalSizeMb = Math.toIntExact(plan.getSplitTarget().getRegionSizeMb());
+ } catch (ArithmeticException e) {
+ LOG.debug("Split request size overflows rate limiter data type. {}", plan);
+ planSkipped(plan.getType());
+ return;
+ }
+ final RegionInfo info = plan.getSplitTarget().getRegionInfo();
+ final long rateLimitedSecs = Math.round(rateLimiter.acquire(Math.max(1, totalSizeMb)));
+ LOG.debug("Rate limiting delayed this operation by {}", Duration.ofSeconds(rateLimitedSecs));
+
+ final long pid;
+ try {
+ pid = masterServices.splitRegion(
+ info, null, HConstants.NO_NONCE, HConstants.NO_NONCE);
+ } catch (IOException e) {
+ LOG.info("failed to submit plan {}.", plan, e);
+ planSkipped(plan.getType());
+ return;
+ }
+ splitPlanCount++;
+ LOG.info("Submitted {} resulting in pid {}", plan, pid);
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index a904e17f7b0..a641a0aa25b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.RegionStates;
-import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
@@ -54,29 +53,9 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUti
* Otherwise, for the next region in the chain R1, if R0 + R1 is smaller then S, R0 and R1
* are kindly requested to merge.
*
- *
- * The following parameters are configurable:
- *
- * - Whether to split a region as part of normalization. Configuration:
- * {@value #SPLIT_ENABLED_KEY}, default: {@value #DEFAULT_SPLIT_ENABLED}.
- * - Whether to merge a region as part of normalization. Configuration:
- * {@value #MERGE_ENABLED_KEY}, default: {@value #DEFAULT_MERGE_ENABLED}.
- * - The minimum number of regions in a table to consider it for merge normalization.
- * Configuration: {@value #MIN_REGION_COUNT_KEY}, default:
- * {@value #DEFAULT_MIN_REGION_COUNT}.
- * - The minimum age for a region to be considered for a merge, in days. Configuration:
- * {@value #MERGE_MIN_REGION_AGE_DAYS_KEY}, default:
- * {@value #DEFAULT_MERGE_MIN_REGION_AGE_DAYS}.
- * - The minimum size for a region to be considered for a merge, in whole MBs. Configuration:
- * {@value #MERGE_MIN_REGION_SIZE_MB_KEY}, default:
- * {@value #DEFAULT_MERGE_MIN_REGION_SIZE_MB}.
- *
- *
- * To see detailed logging of the application of these configuration values, set the log level for
- * this class to `TRACE`.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class SimpleRegionNormalizer implements RegionNormalizer {
+class SimpleRegionNormalizer implements RegionNormalizer {
private static final Logger LOG = LoggerFactory.getLogger(SimpleRegionNormalizer.class);
static final String SPLIT_ENABLED_KEY = "hbase.normalizer.split.enabled";
@@ -92,7 +71,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
static final String MERGE_MIN_REGION_SIZE_MB_KEY = "hbase.normalizer.merge.min_region_size.mb";
static final int DEFAULT_MERGE_MIN_REGION_SIZE_MB = 1;
- private final long[] skippedCount;
private Configuration conf;
private MasterServices masterServices;
private boolean splitEnabled;
@@ -102,7 +80,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
private int mergeMinRegionSizeMb;
public SimpleRegionNormalizer() {
- skippedCount = new long[NormalizationPlan.PlanType.values().length];
splitEnabled = DEFAULT_SPLIT_ENABLED;
mergeEnabled = DEFAULT_MERGE_ENABLED;
minRegionCount = DEFAULT_MIN_REGION_COUNT;
@@ -203,16 +180,6 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
this.masterServices = masterServices;
}
- @Override
- public void planSkipped(final RegionInfo hri, final PlanType type) {
- skippedCount[type.ordinal()]++;
- }
-
- @Override
- public long getSkippedCount(NormalizationPlan.PlanType type) {
- return skippedCount[type.ordinal()];
- }
-
@Override
public List computePlansForTable(final TableName table) {
if (table == null) {
@@ -371,7 +338,11 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
final long nextSizeMb = getRegionSizeMB(next);
// always merge away empty regions when they present themselves.
if (currentSizeMb == 0 || nextSizeMb == 0 || currentSizeMb + nextSizeMb < avgRegionSizeMb) {
- plans.add(new MergeNormalizationPlan(current, next));
+ final MergeNormalizationPlan plan = new MergeNormalizationPlan.Builder()
+ .addTarget(current, currentSizeMb)
+ .addTarget(next, nextSizeMb)
+ .build();
+ plans.add(plan);
candidateIdx++;
}
}
@@ -408,11 +379,11 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
if (skipForSplit(ctx.getRegionStates().getRegionState(hri), hri)) {
continue;
}
- final long regionSize = getRegionSizeMB(hri);
- if (regionSize > 2 * avgRegionSize) {
+ final long regionSizeMb = getRegionSizeMB(hri);
+ if (regionSizeMb > 2 * avgRegionSize) {
LOG.info("Table {}, large region {} has size {}, more than twice avg size {}, splitting",
- ctx.getTableName(), hri.getRegionNameAsString(), regionSize, avgRegionSize);
- plans.add(new SplitNormalizationPlan(hri));
+ ctx.getTableName(), hri.getRegionNameAsString(), regionSizeMb, avgRegionSize);
+ plans.add(new SplitNormalizationPlan(hri, regionSizeMb));
}
}
return plans;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
index 7c634fbf248..ffe68cc9f62 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SplitNormalizationPlan.java
@@ -18,32 +18,23 @@
*/
package org.apache.hadoop.hbase.master.normalizer;
-import java.io.IOException;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
-import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * Normalization plan to split region.
+ * Normalization plan to split a region.
*/
@InterfaceAudience.Private
-public class SplitNormalizationPlan implements NormalizationPlan {
+final class SplitNormalizationPlan implements NormalizationPlan {
- private final RegionInfo regionInfo;
+ private final NormalizationTarget splitTarget;
- public SplitNormalizationPlan(RegionInfo regionInfo) {
- this.regionInfo = regionInfo;
- }
-
- @Override
- public long submit(MasterServices masterServices) throws IOException {
- return masterServices.splitRegion(regionInfo, null, HConstants.NO_NONCE,
- HConstants.NO_NONCE);
+ SplitNormalizationPlan(final RegionInfo splitTarget, final long splitTargetSizeMb) {
+ this.splitTarget = new NormalizationTarget(splitTarget, splitTargetSizeMb);
}
@Override
@@ -51,14 +42,14 @@ public class SplitNormalizationPlan implements NormalizationPlan {
return PlanType.SPLIT;
}
- public RegionInfo getRegionInfo() {
- return regionInfo;
+ public NormalizationTarget getSplitTarget() {
+ return splitTarget;
}
@Override
public String toString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
- .append("regionInfo", regionInfo)
+ .append("splitTarget", splitTarget)
.toString();
}
@@ -75,13 +66,13 @@ public class SplitNormalizationPlan implements NormalizationPlan {
SplitNormalizationPlan that = (SplitNormalizationPlan) o;
return new EqualsBuilder()
- .append(regionInfo, that.regionInfo)
+ .append(splitTarget, that.splitTarget)
.isEquals();
}
@Override public int hashCode() {
return new HashCodeBuilder(17, 37)
- .append(regionInfo)
+ .append(splitTarget)
.toHashCode();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java
new file mode 100644
index 00000000000..e3180347dc3
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/package-info.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * The Region Normalizer subsystem is responsible for coaxing all the regions in a table toward
+ * a "normal" size, according to their storefile size. It does this by splitting regions that
+ * are significantly larger than the norm, and merging regions that are significantly smaller than
+ * the norm.
+ *
+ * The public interface to the Region Normalizer subsystem is limited to the following classes:
+ *
+ * The Region Normalizer subsystem is composed of a handful of related classes:
+ *
+ * -
+ * The {@link org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker} provides a system by
+ * which the Normalizer can be disabled at runtime. It currently does this by managing a znode,
+ * but this is an implementation detail.
+ *
+ * -
+ * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorkQueue} is a
+ * {@link java.util.Set}-like {@link java.util.Queue} that permits a single copy of a given
+ * work item to exist in the queue at one time. It also provides a facility for a producer to
+ * add an item to the front of the line. Consumers are blocked waiting for new work.
+ *
+ * -
+ * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore} wakes up
+ * periodically and schedules new normalization work, adding targets to the queue.
+ *
+ * -
+ * The {@link org.apache.hadoop.hbase.master.normalizer.RegionNormalizerWorker} runs in a
+ * daemon thread, grabbing work off the queue as is it becomes available.
+ *
+ * -
+ * The {@link org.apache.hadoop.hbase.master.normalizer.SimpleRegionNormalizer} implements the
+ * logic for calculating target region sizes and emitting a list of corresponding
+ * {@link org.apache.hadoop.hbase.master.normalizer.NormalizationPlan} objects.
+ *
+ *
+ */
+package org.apache.hadoop.hbase.master.normalizer;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 7c65005de55..3f3e80960bb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.master;
import static org.mockito.Mockito.mock;
-
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.AsyncClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
import org.apache.hadoop.hbase.master.locking.LockManager;
-import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
+import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerManager;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.replication.ReplicationPeerManager;
import org.apache.hadoop.hbase.master.replication.SyncReplicationReplayWALManager;
@@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
import org.apache.hadoop.hbase.security.access.AccessChecker;
import org.apache.hadoop.hbase.security.access.ZKPermissionWatcher;
import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-
import org.apache.hbase.thirdparty.com.google.protobuf.Service;
public class MockNoopMasterServices implements MasterServices {
@@ -109,11 +108,6 @@ public class MockNoopMasterServices implements MasterServices {
return null;
}
- @Override
- public RegionNormalizer getRegionNormalizer() {
- return null;
- }
-
@Override
public CatalogJanitor getCatalogJanitor() {
return null;
@@ -139,6 +133,10 @@ public class MockNoopMasterServices implements MasterServices {
return null;
}
+ @Override public RegionNormalizerManager getRegionNormalizerManager() {
+ return null;
+ }
+
@Override
public ProcedureExecutor getMasterProcedureExecutor() {
return null;
@@ -341,6 +339,10 @@ public class MockNoopMasterServices implements MasterServices {
return false;
}
+ @Override public boolean skipRegionManagementAction(String action) {
+ return false;
+ }
+
@Override
public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
return 0;
@@ -507,4 +509,9 @@ public class MockNoopMasterServices implements MasterServices {
public boolean isBalancerOn() {
return false;
}
+
+ @Override
+ public boolean normalizeRegions(NormalizeTableFilterParams ntfp, boolean isHighPriority) {
+ return false;
+ }
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java
index 5aec49bdb11..87a7e680ff8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterChoreScheduled.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,7 +19,6 @@
package org.apache.hadoop.hbase.master;
import java.lang.reflect.Field;
-
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.ScheduledChore;
@@ -30,7 +29,6 @@ import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
import org.apache.hadoop.hbase.master.cleaner.ReplicationBarrierCleaner;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
-import org.apache.hadoop.hbase.master.normalizer.RegionNormalizerChore;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.AfterClass;
@@ -66,7 +64,7 @@ public class TestMasterChoreScheduled {
}
@Test
- public void testDefaultScheduledChores() throws Exception {
+ public void testDefaultScheduledChores() {
// test if logCleaner chore is scheduled by default in HMaster init
TestChoreField logCleanerTestChoreField = new TestChoreField<>();
LogCleaner logCleaner = logCleanerTestChoreField.getChoreObj("logCleaner");
@@ -96,10 +94,10 @@ public class TestMasterChoreScheduled {
balancerChoreTestChoreField.testIfChoreScheduled(balancerChore);
// test if normalizerChore chore is scheduled by default in HMaster init
- TestChoreField regionNormalizerChoreTestChoreField =
+ ScheduledChore regionNormalizerChore = hMaster.getRegionNormalizerManager()
+ .getRegionNormalizerChore();
+ TestChoreField regionNormalizerChoreTestChoreField =
new TestChoreField<>();
- RegionNormalizerChore regionNormalizerChore = regionNormalizerChoreTestChoreField
- .getChoreObj("normalizerChore");
regionNormalizerChoreTestChoreField.testIfChoreScheduled(regionNormalizerChore);
// test if catalogJanitorChore chore is scheduled by default in HMaster init
@@ -114,22 +112,27 @@ public class TestMasterChoreScheduled {
hbckChoreTestChoreField.testIfChoreScheduled(hbckChore);
}
-
+ /**
+ * Reflect into the {@link HMaster} instance and find by field name a specified instance
+ * of {@link ScheduledChore}.
+ */
private static class TestChoreField {
- private E getChoreObj(String fieldName) throws NoSuchFieldException,
- IllegalAccessException {
- Field masterField = HMaster.class.getDeclaredField(fieldName);
- masterField.setAccessible(true);
- E choreFieldVal = (E) masterField.get(hMaster);
- return choreFieldVal;
+ @SuppressWarnings("unchecked")
+ private E getChoreObj(String fieldName) {
+ try {
+ Field masterField = HMaster.class.getDeclaredField(fieldName);
+ masterField.setAccessible(true);
+ return (E) masterField.get(hMaster);
+ } catch (Exception e) {
+ throw new AssertionError(
+ "Unable to retrieve field '" + fieldName + "' from HMaster instance.", e);
+ }
}
private void testIfChoreScheduled(E choreObj) {
Assert.assertNotNull(choreObj);
Assert.assertTrue(hMaster.getChoreService().isChoreScheduled(choreObj));
}
-
}
-
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
index ff88be1ef20..6ac68b30048 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
@@ -72,8 +72,10 @@ public class TestMasterMetricsWrapper {
public void testInfo() {
HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
MetricsMasterWrapperImpl info = new MetricsMasterWrapperImpl(master);
- assertEquals(master.getSplitPlanCount(), info.getSplitPlanCount(), 0);
- assertEquals(master.getMergePlanCount(), info.getMergePlanCount(), 0);
+ assertEquals(
+ master.getRegionNormalizerManager().getSplitPlanCount(), info.getSplitPlanCount(), 0);
+ assertEquals(
+ master.getRegionNormalizerManager().getMergePlanCount(), info.getMergePlanCount(), 0);
assertEquals(master.getAverageLoad(), info.getAverageLoad(), 0);
assertEquals(master.getClusterId(), info.getClusterId());
assertEquals(master.getMasterActiveTime(), info.getActiveTime());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java
new file mode 100644
index 00000000000..7e6c74910ed
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorkQueue.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.normalizer;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.contains;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.lessThanOrEqualTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Tests that {@link RegionNormalizerWorkQueue} implements the contract described in its docstring.
+ */
+@Category({ MasterTests.class, SmallTests.class})
+public class TestRegionNormalizerWorkQueue {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestRegionNormalizerWorkQueue.class);
+
+ @Rule
+ public TestName testName = new TestName();
+
+ @Test
+ public void testElementUniquenessAndFIFO() throws Exception {
+ final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>();
+ final List content = new LinkedList<>();
+ IntStream.of(4, 3, 2, 1, 4, 3, 2, 1)
+ .boxed()
+ .forEach(queue::put);
+ assertEquals(4, queue.size());
+ while (queue.size() > 0) {
+ content.add(queue.take());
+ }
+ assertThat(content, contains(4, 3, 2, 1));
+
+ queue.clear();
+ queue.putAll(Arrays.asList(4, 3, 2, 1));
+ queue.putAll(Arrays.asList(4, 5));
+ assertEquals(5, queue.size());
+ content.clear();
+ while (queue.size() > 0) {
+ content.add(queue.take());
+ }
+ assertThat(content, contains(4, 3, 2, 1, 5));
+ }
+
+ @Test
+ public void testPriorityAndFIFO() throws Exception {
+ final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>();
+ final List content = new LinkedList<>();
+ queue.putAll(Arrays.asList(4, 3, 2, 1));
+ assertEquals(4, queue.size());
+ queue.putFirst(0);
+ assertEquals(5, queue.size());
+ drainTo(queue, content);
+ assertThat("putFirst items should jump the queue, preserving existing order",
+ content, contains(0, 4, 3, 2, 1));
+
+ queue.clear();
+ content.clear();
+ queue.putAll(Arrays.asList(4, 3, 2, 1));
+ queue.putFirst(1);
+ assertEquals(4, queue.size());
+ drainTo(queue, content);
+ assertThat("existing items re-added with putFirst should jump the queue",
+ content, contains(1, 4, 3, 2));
+
+ queue.clear();
+ content.clear();
+ queue.putAll(Arrays.asList(4, 3, 2, 1));
+ queue.putAllFirst(Arrays.asList(2, 3));
+ assertEquals(4, queue.size());
+ drainTo(queue, content);
+ assertThat(
+ "existing items re-added with putAllFirst jump the queue AND honor changes in priority",
+ content, contains(2, 3, 4, 1));
+ }
+
+ private enum Action {
+ PUT,
+ PUT_FIRST,
+ PUT_ALL,
+ PUT_ALL_FIRST,
+ }
+
+ /**
+ * Test that the uniqueness constraint is honored in the face of concurrent modification.
+ */
+ @Test
+ public void testConcurrentPut() throws Exception {
+ final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>();
+ final int maxValue = 100;
+ final Runnable producer = () -> {
+ final Random rand = ThreadLocalRandom.current();
+ for (int i = 0; i < 1_000; i++) {
+ final Action action = Action.values()[rand.nextInt(Action.values().length)];
+ switch (action) {
+ case PUT: {
+ final int val = rand.nextInt(maxValue);
+ queue.put(val);
+ break;
+ }
+ case PUT_FIRST: {
+ final int val = rand.nextInt(maxValue);
+ queue.putFirst(val);
+ break;
+ }
+ case PUT_ALL: {
+ final List vals = rand.ints(5, 0, maxValue)
+ .boxed()
+ .collect(Collectors.toList());
+ queue.putAll(vals);
+ break;
+ }
+ case PUT_ALL_FIRST: {
+ final List vals = rand.ints(5, 0, maxValue)
+ .boxed()
+ .collect(Collectors.toList());
+ queue.putAllFirst(vals);
+ break;
+ }
+ default:
+ fail("Unrecognized action " + action);
+ }
+ }
+ };
+
+ final int numThreads = 5;
+ final CompletableFuture>[] futures = IntStream.range(0, numThreads)
+ .mapToObj(val -> CompletableFuture.runAsync(producer))
+ .toArray(CompletableFuture>[]::new);
+ CompletableFuture.allOf(futures).join();
+
+ final List content = new ArrayList<>(queue.size());
+ drainTo(queue, content);
+ assertThat("at most `maxValue` items should be present.",
+ content.size(), lessThanOrEqualTo(maxValue));
+ assertEquals("all items should be unique.", content.size(), new HashSet<>(content).size());
+ }
+
+ /**
+ * Test that calls to {@link RegionNormalizerWorkQueue#take()} block the requesting thread. The
+ * producing thread places new entries onto the queue following a known schedule. The consuming
+ * thread collects a time measurement between calls to {@code take}. Finally, the test makes
+ * coarse-grained assertions of the consumer's observations based on the producer's schedule.
+ */
+ @Test
+ public void testTake() throws Exception {
+ final RegionNormalizerWorkQueue queue = new RegionNormalizerWorkQueue<>();
+ final ConcurrentLinkedQueue takeTimes = new ConcurrentLinkedQueue<>();
+ final AtomicBoolean finished = new AtomicBoolean(false);
+ final Runnable consumer = () -> {
+ try {
+ while (!finished.get()) {
+ queue.take();
+ takeTimes.add(System.nanoTime());
+ }
+ } catch (InterruptedException e) {
+ fail("interrupted.");
+ }
+ };
+
+ CompletableFuture worker = CompletableFuture.runAsync(consumer);
+ final long testStart = System.nanoTime();
+ for (int i = 0; i < 5; i++) {
+ Thread.sleep(10);
+ queue.put(i);
+ }
+
+ // set finished = true and pipe one more value in case the thread needs an extra pass through
+ // the loop.
+ finished.set(true);
+ queue.put(1);
+ worker.get(1, TimeUnit.SECONDS);
+
+ final Iterator times = takeTimes.iterator();
+ assertTrue("should have timing information for at least 2 calls to take.",
+ takeTimes.size() >= 5);
+ for (int i = 0; i < 5; i++) {
+ assertThat(
+ "Observations collected in takeTimes should increase by roughly 10ms every interval",
+ times.next(), greaterThan(testStart + TimeUnit.MILLISECONDS.toNanos(i * 10)));
+ }
+ }
+
+ private static void drainTo(final RegionNormalizerWorkQueue queue, Collection dest)
+ throws InterruptedException {
+ assertThat(queue.size(), greaterThan(0));
+ while (queue.size() > 0) {
+ dest.add(queue.take());
+ }
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java
new file mode 100644
index 00000000000..e3a29b85406
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestRegionNormalizerWorker.java
@@ -0,0 +1,252 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.normalizer;
+
+import static java.util.Collections.singletonList;
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.comparesEqualTo;
+import static org.hamcrest.Matchers.greaterThan;
+import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.nullValue;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.Mockito.when;
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNameTestRule;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.hamcrest.Description;
+import org.hamcrest.Matcher;
+import org.hamcrest.StringDescription;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.mockito.Answers;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.junit.MockitoJUnit;
+import org.mockito.junit.MockitoRule;
+import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A test over {@link RegionNormalizerWorker}. Being a background thread, the only points of
+ * interaction we have to this class are its input source ({@link RegionNormalizerWorkQueue} and
+ * its callbacks invoked against {@link RegionNormalizer} and {@link MasterServices}. The work
+ * queue is simple enough to use directly; for {@link MasterServices}, use a mock because, as of
+ * now, the worker only invokes 4 methods.
+ */
+@Category({ MasterTests.class, SmallTests.class})
+public class TestRegionNormalizerWorker {
+
+ @ClassRule
+ public static final HBaseClassTestRule CLASS_RULE =
+ HBaseClassTestRule.forClass(TestRegionNormalizerWorker.class);
+
+ @Rule
+ public TestName testName = new TestName();
+ @Rule
+ public TableNameTestRule tableName = new TableNameTestRule();
+
+ @Rule
+ public MockitoRule mockitoRule = MockitoJUnit.rule();
+
+ @Mock(answer = Answers.RETURNS_DEEP_STUBS)
+ private MasterServices masterServices;
+ @Mock
+ private RegionNormalizer regionNormalizer;
+
+ private HBaseCommonTestingUtility testingUtility;
+ private RegionNormalizerWorkQueue queue;
+ private ExecutorService workerPool;
+
+ private final AtomicReference workerThreadThrowable = new AtomicReference<>();
+
+ @Before
+ public void before() throws Exception {
+ MockitoAnnotations.initMocks(this);
+ when(masterServices.skipRegionManagementAction(any())).thenReturn(false);
+ testingUtility = new HBaseCommonTestingUtility();
+ queue = new RegionNormalizerWorkQueue<>();
+ workerThreadThrowable.set(null);
+
+ final String threadNameFmt =
+ TestRegionNormalizerWorker.class.getSimpleName() + "-" + testName.getMethodName() + "-%d";
+ final ThreadFactory threadFactory = new ThreadFactoryBuilder()
+ .setNameFormat(threadNameFmt)
+ .setDaemon(true)
+ .setUncaughtExceptionHandler((t, e) -> workerThreadThrowable.set(e))
+ .build();
+ workerPool = Executors.newSingleThreadExecutor(threadFactory);
+ }
+
+ @After
+ public void after() throws Exception {
+ workerPool.shutdownNow(); // shutdownNow to interrupt the worker thread sitting on `take()`
+ assertTrue("timeout waiting for worker thread to terminate",
+ workerPool.awaitTermination(30, TimeUnit.SECONDS));
+ final Throwable workerThrowable = workerThreadThrowable.get();
+ assertThat("worker thread threw unexpected exception", workerThrowable, nullValue());
+ }
+
+ @Test
+ public void testMergeCounter() throws Exception {
+ final TableName tn = tableName.getTableName();
+ final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn)
+ .setNormalizationEnabled(true)
+ .build();
+ when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor);
+ when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong()))
+ .thenReturn(1L);
+ when(regionNormalizer.computePlansForTable(tn))
+ .thenReturn(singletonList(new MergeNormalizationPlan.Builder()
+ .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 10)
+ .addTarget(RegionInfoBuilder.newBuilder(tn).build(), 20)
+ .build()));
+
+ final RegionNormalizerWorker worker = new RegionNormalizerWorker(
+ testingUtility.getConfiguration(), masterServices, regionNormalizer, queue);
+ final long beforeMergePlanCount = worker.getMergePlanCount();
+ workerPool.submit(worker);
+ queue.put(tn);
+
+ assertThatEventually("executing work should see plan count increase",
+ worker::getMergePlanCount, greaterThan(beforeMergePlanCount));
+ }
+
+ @Test
+ public void testSplitCounter() throws Exception {
+ final TableName tn = tableName.getTableName();
+ final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn)
+ .setNormalizationEnabled(true)
+ .build();
+ when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor);
+ when(masterServices.splitRegion(any(), any(), anyLong(), anyLong()))
+ .thenReturn(1L);
+ when(regionNormalizer.computePlansForTable(tn))
+ .thenReturn(singletonList(
+ new SplitNormalizationPlan(RegionInfoBuilder.newBuilder(tn).build(), 10)));
+
+ final RegionNormalizerWorker worker = new RegionNormalizerWorker(
+ testingUtility.getConfiguration(), masterServices, regionNormalizer, queue);
+ final long beforeSplitPlanCount = worker.getSplitPlanCount();
+ workerPool.submit(worker);
+ queue.put(tn);
+
+ assertThatEventually("executing work should see plan count increase",
+ worker::getSplitPlanCount, greaterThan(beforeSplitPlanCount));
+ }
+
+ /**
+ * Assert that a rate limit is honored, at least in a rough way. Maintainers should manually
+ * inspect the log messages emitted by the worker thread to confirm that expected behavior.
+ */
+ @Test
+ public void testRateLimit() throws Exception {
+ final TableName tn = tableName.getTableName();
+ final TableDescriptor tnDescriptor = TableDescriptorBuilder.newBuilder(tn)
+ .setNormalizationEnabled(true)
+ .build();
+ final RegionInfo splitRegionInfo = RegionInfoBuilder.newBuilder(tn).build();
+ final RegionInfo mergeRegionInfo1 = RegionInfoBuilder.newBuilder(tn).build();
+ final RegionInfo mergeRegionInfo2 = RegionInfoBuilder.newBuilder(tn).build();
+ when(masterServices.getTableDescriptors().get(tn)).thenReturn(tnDescriptor);
+ when(masterServices.splitRegion(any(), any(), anyLong(), anyLong()))
+ .thenReturn(1L);
+ when(masterServices.mergeRegions(any(), anyBoolean(), anyLong(), anyLong()))
+ .thenReturn(1L);
+ when(regionNormalizer.computePlansForTable(tn))
+ .thenReturn(Arrays.asList(
+ new SplitNormalizationPlan(splitRegionInfo, 2),
+ new MergeNormalizationPlan.Builder()
+ .addTarget(mergeRegionInfo1, 1)
+ .addTarget(mergeRegionInfo2, 2)
+ .build(),
+ new SplitNormalizationPlan(splitRegionInfo, 1)));
+
+ final Configuration conf = testingUtility.getConfiguration();
+ conf.set("hbase.normalizer.throughput.max_bytes_per_sec", "1m");
+ final RegionNormalizerWorker worker = new RegionNormalizerWorker(
+ testingUtility.getConfiguration(), masterServices, regionNormalizer, queue);
+ workerPool.submit(worker);
+ final long startTime = System.nanoTime();
+ queue.put(tn);
+
+ assertThatEventually("executing work should see split plan count increase",
+ worker::getSplitPlanCount, comparesEqualTo(2L));
+ assertThatEventually("executing work should see merge plan count increase",
+ worker::getMergePlanCount, comparesEqualTo(1L));
+
+ final long endTime = System.nanoTime();
+ assertThat("rate limited normalizer should have taken at least 5 seconds",
+ Duration.ofNanos(endTime - startTime), greaterThanOrEqualTo(Duration.ofSeconds(5)));
+ }
+
+ /**
+ * Repeatedly evaluates {@code matcher} against the result of calling {@code actualSupplier}
+ * until the matcher succeeds or the timeout period of 30 seconds is exhausted.
+ */
+ private void assertThatEventually(
+ final String reason,
+ final Supplier extends T> actualSupplier,
+ final Matcher super T> matcher
+ ) throws Exception {
+ testingUtility.waitFor(TimeUnit.SECONDS.toMillis(30),
+ new Waiter.ExplainingPredicate() {
+ private T lastValue = null;
+
+ @Override
+ public String explainFailure() {
+ final Description description = new StringDescription()
+ .appendText(reason)
+ .appendText("\nExpected: ")
+ .appendDescriptionOf(matcher)
+ .appendText("\n but: ");
+ matcher.describeMismatch(lastValue, description);
+ return description.toString();
+ }
+
+ @Override public boolean evaluate() {
+ lastValue = actualSupplier.get();
+ return matcher.matches(lastValue);
+ }
+ });
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
index 89da907eeb0..f263cbc4fdf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizer.java
@@ -175,8 +175,12 @@ public class TestSimpleRegionNormalizer {
createRegionSizesMap(regionInfos, 15, 5, 5, 15, 16);
setupMocksForNormalizer(regionSizes, regionInfos);
- assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(1), regionInfos.get(2))));
+ assertThat(
+ normalizer.computePlansForTable(tableName),
+ contains(new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(1), 5)
+ .addTarget(regionInfos.get(2), 5)
+ .build()));
}
// Test for situation illustrated in HBASE-14867
@@ -188,9 +192,12 @@ public class TestSimpleRegionNormalizer {
createRegionSizesMap(regionInfos, 1, 10000, 10000, 10000, 2700, 2700);
setupMocksForNormalizer(regionSizes, regionInfos);
- assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(4), regionInfos.get(5))
- ));
+ assertThat(
+ normalizer.computePlansForTable(tableName),
+ contains(new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(4), 2700)
+ .addTarget(regionInfos.get(5), 2700)
+ .build()));
}
@Test
@@ -214,7 +221,7 @@ public class TestSimpleRegionNormalizer {
setupMocksForNormalizer(regionSizes, regionInfos);
assertThat(normalizer.computePlansForTable(tableName), contains(
- new SplitNormalizationPlan(regionInfos.get(3))));
+ new SplitNormalizationPlan(regionInfos.get(3), 30)));
}
@Test
@@ -229,18 +236,26 @@ public class TestSimpleRegionNormalizer {
when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize())
.thenReturn(20L);
assertThat(normalizer.computePlansForTable(tableName), contains(
- new SplitNormalizationPlan(regionInfos.get(2)),
- new SplitNormalizationPlan(regionInfos.get(3)),
- new SplitNormalizationPlan(regionInfos.get(4)),
- new SplitNormalizationPlan(regionInfos.get(5))
+ new SplitNormalizationPlan(regionInfos.get(2), 60),
+ new SplitNormalizationPlan(regionInfos.get(3), 80),
+ new SplitNormalizationPlan(regionInfos.get(4), 100),
+ new SplitNormalizationPlan(regionInfos.get(5), 120)
));
// test when target region size is 200
when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionSize())
.thenReturn(200L);
- assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)),
- new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3))));
+ assertThat(
+ normalizer.computePlansForTable(tableName),
+ contains(
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(0), 20)
+ .addTarget(regionInfos.get(1), 40)
+ .build(),
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(2), 60)
+ .addTarget(regionInfos.get(3), 80)
+ .build()));
}
@Test
@@ -255,14 +270,18 @@ public class TestSimpleRegionNormalizer {
when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount())
.thenReturn(8);
assertThat(normalizer.computePlansForTable(tableName), contains(
- new SplitNormalizationPlan(regionInfos.get(2)),
- new SplitNormalizationPlan(regionInfos.get(3))));
+ new SplitNormalizationPlan(regionInfos.get(2), 60),
+ new SplitNormalizationPlan(regionInfos.get(3), 80)));
// test when target region count is 3
when(masterServices.getTableDescriptors().get(any()).getNormalizerTargetRegionCount())
.thenReturn(3);
- assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1))));
+ assertThat(
+ normalizer.computePlansForTable(tableName),
+ contains(new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(0), 20)
+ .addTarget(regionInfos.get(1), 40)
+ .build()));
}
@Test
@@ -312,14 +331,17 @@ public class TestSimpleRegionNormalizer {
List plans = normalizer.computePlansForTable(tableName);
assertThat(plans, contains(
- new SplitNormalizationPlan(regionInfos.get(2)),
- new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1))));
+ new SplitNormalizationPlan(regionInfos.get(2), 10),
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(0), 1)
+ .addTarget(regionInfos.get(1), 1)
+ .build()));
// have to call setupMocks again because we don't have dynamic config update on normalizer.
conf.setInt(MIN_REGION_COUNT_KEY, 4);
setupMocksForNormalizer(regionSizes, regionInfos);
assertThat(normalizer.computePlansForTable(tableName), contains(
- new SplitNormalizationPlan(regionInfos.get(2))));
+ new SplitNormalizationPlan(regionInfos.get(2), 10)));
}
@Test
@@ -356,8 +378,12 @@ public class TestSimpleRegionNormalizer {
assertFalse(normalizer.isSplitEnabled());
assertEquals(1, normalizer.getMergeMinRegionSizeMb());
- assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1))));
+ assertThat(
+ normalizer.computePlansForTable(tableName),
+ contains(new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(0), 1)
+ .addTarget(regionInfos.get(1), 2)
+ .build()));
conf.setInt(MERGE_MIN_REGION_SIZE_MB_KEY, 3);
setupMocksForNormalizer(regionSizes, regionInfos);
@@ -378,9 +404,18 @@ public class TestSimpleRegionNormalizer {
assertFalse(normalizer.isSplitEnabled());
assertEquals(0, normalizer.getMergeMinRegionSizeMb());
assertThat(normalizer.computePlansForTable(tableName), contains(
- new MergeNormalizationPlan(regionInfos.get(0), regionInfos.get(1)),
- new MergeNormalizationPlan(regionInfos.get(2), regionInfos.get(3)),
- new MergeNormalizationPlan(regionInfos.get(5), regionInfos.get(6))));
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(0), 0)
+ .addTarget(regionInfos.get(1), 1)
+ .build(),
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(2), 10)
+ .addTarget(regionInfos.get(3), 0)
+ .build(),
+ new MergeNormalizationPlan.Builder()
+ .addTarget(regionInfos.get(5), 10)
+ .addTarget(regionInfos.get(6), 0)
+ .build()));
}
// This test is to make sure that normalizer is only going to merge adjacent regions.
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
index 173adf49db2..f5feb59ca32 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
@@ -21,7 +21,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
@@ -161,6 +160,7 @@ public class TestSimpleRegionNormalizerOnCluster {
tn2 + " should not have split.",
tn2RegionCount,
getRegionCount(tn2));
+ LOG.debug("waiting for t3 to settle...");
waitForTableRegionCount(tn3, tn3RegionCount);
} finally {
dropIfExists(tn1);
@@ -187,7 +187,7 @@ public class TestSimpleRegionNormalizerOnCluster {
: TableName.valueOf(name.getMethodName());
final int currentRegionCount = createTableBegsSplit(tableName, true, false);
- final long existingSkippedSplitCount = master.getRegionNormalizer()
+ final long existingSkippedSplitCount = master.getRegionNormalizerManager()
.getSkippedCount(PlanType.SPLIT);
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize().get());
@@ -332,7 +332,8 @@ public class TestSimpleRegionNormalizerOnCluster {
return "waiting to observe split attempt and skipped.";
}
@Override public boolean evaluate() {
- final long skippedSplitCount = master.getRegionNormalizer().getSkippedCount(PlanType.SPLIT);
+ final long skippedSplitCount = master.getRegionNormalizerManager()
+ .getSkippedCount(PlanType.SPLIT);
return skippedSplitCount > existingSkippedSplitCount;
}
});