HBASE-24627 Normalize one table at a time

Introduce an additional method to our Admin interface that allow an
operator to selectivly run the normalizer. The IPC protocol supports
general table name select via compound filter.

Signed-off-by: Sean Busbey <busbey@apache.org>
Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
Nick Dimiduk 2020-08-06 19:07:46 -07:00 committed by Nick Dimiduk
parent 998ee77133
commit ecc0c67077
17 changed files with 407 additions and 81 deletions

View File

@ -856,10 +856,24 @@ public interface Admin extends Abortable, Closeable {
* the request was submitted successfully. We need to check logs for the details of which regions
* were split/merged.
*
* @return <code>true</code> if region normalizer ran, <code>false</code> otherwise.
* @return {@code true} if region normalizer ran, {@code false} otherwise.
* @throws IOException if a remote or network exception occurs
*/
boolean normalize() throws IOException;
default boolean normalize() throws IOException {
return normalize(new NormalizeTableFilterParams.Builder().build());
}
/**
* Invoke region normalizer. Can NOT run for various reasons. Check logs.
* This is a non-blocking invocation to region normalizer. If return value is true, it means
* the request was submitted successfully. We need to check logs for the details of which regions
* were split/merged.
*
* @param ntfp limit to tables matching the specified filter.
* @return {@code true} if region normalizer ran, {@code false} otherwise.
* @throws IOException if a remote or network exception occurs
*/
boolean normalize(NormalizeTableFilterParams ntfp) throws IOException;
/**
* Query the current state of the region normalizer.

View File

@ -395,8 +395,8 @@ class AdminOverAsyncAdmin implements Admin {
}
@Override
public boolean normalize() throws IOException {
return get(admin.normalize());
public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException {
return get(admin.normalize(ntfp));
}
@Override

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -1279,7 +1279,17 @@ public interface AsyncAdmin {
* @return true if region normalizer ran, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture<Boolean> normalize();
default CompletableFuture<Boolean> normalize() {
return normalize(new NormalizeTableFilterParams.Builder().build());
}
/**
* Invoke region normalizer. Can NOT run for various reasons. Check logs.
* @param ntfp limit to tables matching the specified filter.
* @return true if region normalizer ran, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp);
/**
* Turn the cleaner chore on/off.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -709,8 +709,8 @@ class AsyncHBaseAdmin implements AsyncAdmin {
}
@Override
public CompletableFuture<Boolean> normalize() {
return wrap(rawAdmin.normalize());
public CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp) {
return wrap(rawAdmin.normalize(ntfp));
}
@Override

View File

@ -0,0 +1,107 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.util.List;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
/**
* A collection of criteria used for table selection. The logic of table selection is as follows:
* <ul>
* <li>
* When no parameter values are provided, an unfiltered list of all user tables is returned.
* </li>
* <li>
* When a list of {@link TableName TableNames} are provided, the filter starts with any of
* these tables that exist.
* </li>
* <li>
* When a {@code namespace} name is provided, the filter starts with all the tables present in
* that namespace.
* </li>
* <li>
* If both a list of {@link TableName TableNames} and a {@code namespace} name are provided,
* the {@link TableName} list is honored and the {@code namespace} name is ignored.
* </li>
* <li>
* If a {@code regex} is provided, this subset of {@link TableName TableNames} is further
* reduced to those that match the provided regular expression.
* </li>
* </ul>
*/
@InterfaceAudience.Public
public final class NormalizeTableFilterParams {
private final List<TableName> tableNames;
private final String regex;
private final String namespace;
private NormalizeTableFilterParams(final List<TableName> tableNames, final String regex,
final String namespace) {
this.tableNames = tableNames;
this.regex = regex;
this.namespace = namespace;
}
public List<TableName> getTableNames() {
return tableNames;
}
public String getRegex() {
return regex;
}
public String getNamespace() {
return namespace;
}
/**
* Used to instantiate an instance of {@link NormalizeTableFilterParams}.
*/
public static class Builder {
private List<TableName> tableNames;
private String regex;
private String namespace;
public Builder tableFilterParams(final NormalizeTableFilterParams ntfp) {
this.tableNames = ntfp.getTableNames();
this.regex = ntfp.getRegex();
this.namespace = ntfp.getNamespace();
return this;
}
public Builder tableNames(final List<TableName> tableNames) {
this.tableNames = tableNames;
return this;
}
public Builder regex(final String regex) {
this.regex = regex;
return this;
}
public Builder namespace(final String namespace) {
this.namespace = namespace;
return this;
}
public NormalizeTableFilterParams build() {
return new NormalizeTableFilterParams(tableNames, regex, namespace);
}
}
}

View File

@ -3286,14 +3286,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
}
@Override
public CompletableFuture<Boolean> normalize() {
public CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp) {
return normalize(RequestConverter.buildNormalizeRequest(ntfp));
}
private CompletableFuture<Boolean> normalize(NormalizeRequest request) {
return this
.<Boolean> newMasterCaller()
.action(
(controller, stub) -> this.<NormalizeRequest, NormalizeResponse, Boolean> call(
controller, stub, RequestConverter.buildNormalizeRequest(),
(s, c, req, done) -> s.normalize(c, req, done), (resp) -> resp.getNormalizerRan()))
.call();
.<Boolean> newMasterCaller()
.action(
(controller, stub) -> this.call(
controller, stub, request, MasterService.Interface::normalize,
NormalizeResponse::getNormalizerRan))
.call();
}
@Override

View File

@ -2293,6 +2293,13 @@ public final class ProtobufUtil {
.setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build();
}
public static List<HBaseProtos.TableName> toProtoTableNameList(List<TableName> tableNameList) {
if (tableNameList == null) {
return new ArrayList<>();
}
return tableNameList.stream().map(ProtobufUtil::toProtoTableName).collect(Collectors.toList());
}
public static List<TableName> toTableNameList(List<HBaseProtos.TableName> tableNamesList) {
if (tableNamesList == null) {
return new ArrayList<>();

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.LogQueryFilter;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
import org.apache.hadoop.hbase.client.RegionInfo;
@ -1476,8 +1477,18 @@ public final class RequestConverter {
*
* @return a NormalizeRequest
*/
public static NormalizeRequest buildNormalizeRequest() {
return NormalizeRequest.newBuilder().build();
public static NormalizeRequest buildNormalizeRequest(NormalizeTableFilterParams ntfp) {
final NormalizeRequest.Builder builder = NormalizeRequest.newBuilder();
if (ntfp.getTableNames() != null) {
builder.addAllTableNames(ProtobufUtil.toProtoTableNameList(ntfp.getTableNames()));
}
if (ntfp.getRegex() != null) {
builder.setRegex(ntfp.getRegex());
}
if (ntfp.getNamespace() != null) {
builder.setNamespace(ntfp.getNamespace());
}
return builder.build();
}
/**

View File

@ -354,6 +354,9 @@ message IsSplitOrMergeEnabledResponse {
}
message NormalizeRequest {
repeated TableName table_names = 1;
optional string regex = 2;
optional string namespace = 3;
}
message NormalizeResponse {

View File

@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED
import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS;
import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK;
import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
@ -38,6 +37,7 @@ import java.util.Comparator;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -80,9 +80,9 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionStatesCount;
@ -226,14 +226,13 @@ import org.eclipse.jetty.servlet.ServletHolder;
import org.eclipse.jetty.webapp.WebAppContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors;
import org.apache.hbase.thirdparty.com.google.protobuf.Service;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
@ -1902,14 +1901,18 @@ public class HMaster extends HRegionServer implements MasterServices {
return this.normalizer;
}
public boolean normalizeRegions() throws IOException {
return normalizeRegions(new NormalizeTableFilterParams.Builder().build());
}
/**
* Perform normalization of cluster (invoked by {@link RegionNormalizerChore}).
* Perform normalization of cluster.
*
* @return true if an existing normalization was already in progress, or if a new normalization
* was performed successfully; false otherwise (specifically, if HMaster finished initializing
* or normalization is globally disabled).
*/
public boolean normalizeRegions() throws IOException {
public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IOException {
final long startTime = EnvironmentEdgeManager.currentTime();
if (regionNormalizerTracker == null || !regionNormalizerTracker.isNormalizerOn()) {
LOG.debug("Region normalization is disabled, don't run region normalizer.");
@ -1930,12 +1933,19 @@ public class HMaster extends HRegionServer implements MasterServices {
int affectedTables = 0;
try {
final List<TableName> allEnabledTables =
new ArrayList<>(tableStateManager.getTablesInStates(TableState.State.ENABLED));
Collections.shuffle(allEnabledTables);
final Set<TableName> matchingTables = getTableDescriptors(new LinkedList<>(),
ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false)
.stream()
.map(TableDescriptor::getTableName)
.collect(Collectors.toSet());
final Set<TableName> allEnabledTables =
tableStateManager.getTablesInStates(TableState.State.ENABLED);
final List<TableName> targetTables =
new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables));
Collections.shuffle(targetTables);
final List<Long> submittedPlanProcIds = new ArrayList<>();
for (TableName table : allEnabledTables) {
for (TableName table : targetTables) {
if (table.isSystemTable()) {
continue;
}
@ -3399,9 +3409,9 @@ public class HMaster extends HRegionServer implements MasterServices {
}
/**
* @return list of table table descriptors after filtering by regex and whether to include system
* tables, etc.
* @throws IOException
* Return a list of table table descriptors after applying any provided filter parameters. Note
* that the user-facing description of this filter logic is presented on the class-level javadoc
* of {@link NormalizeTableFilterParams}.
*/
private List<TableDescriptor> getTableDescriptors(final List<TableDescriptor> htds,
final String namespace, final String regex, final List<TableName> tableNameList,

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@ -1920,7 +1921,14 @@ public class MasterRpcServices extends RSRpcServices implements
NormalizeRequest request) throws ServiceException {
rpcPreCheck("normalize");
try {
return NormalizeResponse.newBuilder().setNormalizerRan(master.normalizeRegions()).build();
final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
.tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
.regex(request.hasRegex() ? request.getRegex() : null)
.namespace(request.hasNamespace() ? request.getNamespace() : null)
.build();
return NormalizeResponse.newBuilder()
.setNormalizerRan(master.normalizeRegions(ntfp))
.build();
} catch (IOException ex) {
throw new ServiceException(ex);
}

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.TimeUnit;
@ -35,7 +36,8 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
@ -79,7 +81,7 @@ public class TestSimpleRegionNormalizerOnCluster {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static final byte[] FAMILY_NAME = Bytes.toBytes("fam");
private static Admin admin;
private static AsyncAdmin admin;
private static HMaster master;
@Rule
@ -94,9 +96,12 @@ public class TestSimpleRegionNormalizerOnCluster {
// no way for the test to set the regionId on a created region, so disable this feature.
TEST_UTIL.getConfiguration().setInt("hbase.normalizer.merge.min_region_age.days", 0);
// disable the normalizer coming along and running via Chore
TEST_UTIL.getConfiguration().setInt("hbase.normalizer.period", Integer.MAX_VALUE);
TEST_UTIL.startMiniCluster(1);
TestNamespaceAuditor.waitForQuotaInitialize(TEST_UTIL);
admin = TEST_UTIL.getAdmin();
admin = TEST_UTIL.getAsyncConnection().getAdmin();
master = TEST_UTIL.getHBaseCluster().getMaster();
assertNotNull(master);
}
@ -107,17 +112,17 @@ public class TestSimpleRegionNormalizerOnCluster {
}
@Before
public void before() throws IOException {
public void before() throws Exception {
// disable the normalizer ahead of time, let the test enable it when its ready.
admin.normalizerSwitch(false);
admin.normalizerSwitch(false).get();
}
@Test
public void testHonorsNormalizerSwitch() throws IOException {
assertFalse(admin.isNormalizerEnabled());
assertFalse(admin.normalize());
assertFalse(admin.normalizerSwitch(true));
assertTrue(admin.normalize());
public void testHonorsNormalizerSwitch() throws Exception {
assertFalse(admin.isNormalizerEnabled().get());
assertFalse(admin.normalize().get());
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize().get());
}
/**
@ -137,8 +142,8 @@ public class TestSimpleRegionNormalizerOnCluster {
final int tn2RegionCount = createTableBegsSplit(tn2, false, false);
final int tn3RegionCount = createTableBegsSplit(tn3, true, true);
assertFalse(admin.normalizerSwitch(true));
assertTrue(admin.normalize());
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize().get());
waitForTableSplit(tn1, tn1RegionCount + 1);
// confirm that tn1 has (tn1RegionCount + 1) number of regions.
@ -183,8 +188,8 @@ public class TestSimpleRegionNormalizerOnCluster {
final int currentRegionCount = createTableBegsSplit(tableName, true, false);
final long existingSkippedSplitCount = master.getRegionNormalizer()
.getSkippedCount(PlanType.SPLIT);
assertFalse(admin.normalizerSwitch(true));
assertTrue(admin.normalize());
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize().get());
if (limitedByQuota) {
waitForSkippedSplits(master, existingSkippedSplitCount);
assertEquals(
@ -208,8 +213,8 @@ public class TestSimpleRegionNormalizerOnCluster {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
final int currentRegionCount = createTableBegsMerge(tableName);
assertFalse(admin.normalizerSwitch(true));
assertTrue(admin.normalize());
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize().get());
waitForTableMerge(tableName, currentRegionCount - 1);
assertEquals(
tableName + " should have merged.",
@ -220,14 +225,103 @@ public class TestSimpleRegionNormalizerOnCluster {
}
}
private static TableName buildTableNameForQuotaTest(final String methodName) throws IOException {
@Test
public void testHonorsNamespaceFilter() throws Exception {
final NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
final TableName tn1 = TableName.valueOf("ns", name.getMethodName());
final TableName tn2 = TableName.valueOf(name.getMethodName());
try {
admin.createNamespace(namespaceDescriptor).get();
final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
.namespace("ns")
.build();
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize(ntfp).get());
waitForTableSplit(tn1, tn1RegionCount + 1);
// confirm that tn1 has (tn1RegionCount + 1) number of regions.
// tn2 has tn2RegionCount number of regions because it's not a member of the target namespace.
assertEquals(
tn1 + " should have split.",
tn1RegionCount + 1,
MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
waitForTableRegionCount(tn2, tn2RegionCount);
} finally {
dropIfExists(tn1);
dropIfExists(tn2);
}
}
@Test
public void testHonorsPatternFilter() throws Exception {
final TableName tn1 = TableName.valueOf(name.getMethodName() + "1");
final TableName tn2 = TableName.valueOf(name.getMethodName() + "2");
try {
final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
.regex(".*[1]")
.build();
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize(ntfp).get());
waitForTableSplit(tn1, tn1RegionCount + 1);
// confirm that tn1 has (tn1RegionCount + 1) number of regions.
// tn2 has tn2RegionCount number of regions because it fails filter.
assertEquals(
tn1 + " should have split.",
tn1RegionCount + 1,
MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
waitForTableRegionCount(tn2, tn2RegionCount);
} finally {
dropIfExists(tn1);
dropIfExists(tn2);
}
}
@Test
public void testHonorsNameFilter() throws Exception {
final TableName tn1 = TableName.valueOf(name.getMethodName() + "1");
final TableName tn2 = TableName.valueOf(name.getMethodName() + "2");
try {
final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
.tableNames(Collections.singletonList(tn1))
.build();
assertFalse(admin.normalizerSwitch(true).get());
assertTrue(admin.normalize(ntfp).get());
waitForTableSplit(tn1, tn1RegionCount + 1);
// confirm that tn1 has (tn1RegionCount + 1) number of regions.
// tn2 has tn3RegionCount number of regions because it fails filter:
assertEquals(
tn1 + " should have split.",
tn1RegionCount + 1,
MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
waitForTableRegionCount(tn2, tn2RegionCount);
} finally {
dropIfExists(tn1);
dropIfExists(tn2);
}
}
private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception {
String nsp = "np2";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
admin.createNamespace(nspDesc);
return TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + methodName);
admin.createNamespace(nspDesc).get();
return TableName.valueOf(nsp, methodName);
}
private static void waitForSkippedSplits(final HMaster master,
@ -347,16 +441,17 @@ public class TestSimpleRegionNormalizerOnCluster {
*/
private static int createTableBegsSplit(final TableName tableName,
final boolean normalizerEnabled, final boolean isMergeEnabled)
throws IOException {
throws Exception {
final List<HRegion> generatedRegions = generateTestData(tableName, 1, 1, 2, 3, 5);
assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
admin.flush(tableName);
admin.flush(tableName).get();
final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName))
final TableDescriptor td = TableDescriptorBuilder
.newBuilder(admin.getDescriptor(tableName).get())
.setNormalizationEnabled(normalizerEnabled)
.setMergeEnabled(isMergeEnabled)
.build();
admin.modifyTable(td);
admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
@ -383,16 +478,17 @@ public class TestSimpleRegionNormalizerOnCluster {
* <li>sum of sizes of first two regions < average</li>
* </ul>
*/
private static int createTableBegsMerge(final TableName tableName) throws IOException {
private static int createTableBegsMerge(final TableName tableName) throws Exception {
// create 5 regions with sizes to trigger merge of small regions
final List<HRegion> generatedRegions = generateTestData(tableName, 1, 1, 3, 3, 5);
assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
admin.flush(tableName);
admin.flush(tableName).get();
final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName))
final TableDescriptor td = TableDescriptorBuilder
.newBuilder(admin.getDescriptor(tableName).get())
.setNormalizationEnabled(true)
.build();
admin.modifyTable(td);
admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
@ -411,12 +507,12 @@ public class TestSimpleRegionNormalizerOnCluster {
return 5;
}
private static void dropIfExists(final TableName tableName) throws IOException {
if (tableName != null && admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
private static void dropIfExists(final TableName tableName) throws Exception {
if (tableName != null && admin.tableExists(tableName).get()) {
if (admin.isTableEnabled(tableName).get()) {
admin.disableTable(tableName).get();
}
admin.deleteTable(tableName);
admin.deleteTable(tableName).get();
}
}
}

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.CompactType;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.OnlineLogRecord;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
@ -341,8 +342,9 @@ public class VerifyingRSGroupAdmin implements Admin, Closeable {
return admin.clearBlockCache(tableName);
}
public boolean normalize() throws IOException {
return admin.normalize();
@Override
public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException {
return admin.normalize(ntfp);
}
public boolean isNormalizerEnabled() throws IOException {

View File

@ -258,9 +258,54 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Requests region normalization for all configured tables in the cluster
# Returns true if normalizer ran successfully
def normalize
@admin.normalize
# Returns true if normalize request was successfully submitted
def normalize(*args)
builder = org.apache.hadoop.hbase.client.NormalizeTableFilterParams::Builder.new
args.each do |arg|
unless arg.is_a?(String) || arg.is_a?(Hash)
raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
end
if arg.key?(TABLE_NAME)
table_name = arg.delete(TABLE_NAME)
unless table_name.is_a?(String)
raise(ArgumentError, "#{TABLE_NAME} must be of type String")
end
builder.tableNames(java.util.Collections.singletonList(TableName.valueOf(table_name)))
elsif arg.key?(TABLE_NAMES)
table_names = arg.delete(TABLE_NAMES)
unless table_names.is_a?(Array)
raise(ArgumentError, "#{TABLE_NAMES} must be of type Array")
end
table_name_list = java.util.LinkedList.new
table_names.each do |tn|
unless tn.is_a?(String)
raise(ArgumentError, "#{TABLE_NAMES} value #{tn} must be of type String")
end
table_name_list.add(TableName.valueOf(tn))
end
builder.tableNames(table_name_list)
elsif arg.key?(REGEX)
regex = arg.delete(REGEX)
raise(ArgumentError, "#{REGEX} must be of type String") unless regex.is_a?(String)
builder.regex(regex)
elsif arg.key?(NAMESPACE)
namespace = arg.delete(NAMESPACE)
unless namespace.is_a?(String)
raise(ArgumentError, "#{NAMESPACE} must be of type String")
end
builder.namespace(namespace)
else
raise(ArgumentError, "Unrecognized argument #{arg}")
end
end
ntfp = builder.build
@admin.normalize(ntfp)
end
#----------------------------------------------------------------------------------------------

View File

@ -71,6 +71,7 @@ module HBaseConstants
POLICY = 'POLICY'.freeze
RAW = 'RAW'.freeze
READ_TYPE = 'READ_TYPE'.freeze
REGEX = 'REGEX'.freeze
REGIONSERVER = 'REGIONSERVER'.freeze
REGION_REPLICATION = 'REGION_REPLICATION'.freeze
REGION_REPLICA_ID = 'REGION_REPLICA_ID'.freeze
@ -91,6 +92,8 @@ module HBaseConstants
STOPROW = 'STOPROW'.freeze
TABLE = 'TABLE'.freeze
TABLE_CFS = 'TABLE_CFS'.freeze
TABLE_NAME = 'TABLE_NAME'.freeze
TABLE_NAMES = 'TABLE_NAMES'.freeze
TIMERANGE = 'TIMERANGE'.freeze
TIMESTAMP = 'TIMESTAMP'.freeze
TYPE = 'TYPE'.freeze

View File

@ -22,18 +22,24 @@ module Shell
class Normalize < Command
def help
<<-EOF
Trigger region normalizer for all tables which have NORMALIZATION_ENABLED flag set. Returns true
if normalizer ran successfully, false otherwise. Note that this command has no effect
if region normalizer is disabled (make sure it's turned on using 'normalizer_switch' command).
Trigger the region normalizer. Without arguments, invokes the normalizer without a table filter.
Any arguments are used to limit table selection. Returns true if the normalize request was
submitted successfully, false otherwise. Note that this command has no effect if region normalizer
is disabled (make sure it's turned on using 'normalizer_switch' command).
Examples:
Examples:
hbase> normalize
hbase> normalize
hbase> normalize TABLE_NAME => 'my_table'
hbase> normalize TABLE_NAMES => ['foo', 'bar', 'baz']
hbase> normalize REGEX => 'my_.*'
hbase> normalize NAMESPACE => 'ns1'
hbase> normalize NAMESPACE => 'ns', REGEX => '*._BIG_.*'
EOF
end
def command
did_normalize_run = !!admin.normalize
def command(*args)
did_normalize_run = !!admin.normalize(*args)
formatter.row([did_normalize_run.to_s])
did_normalize_run
end

View File

@ -1,5 +1,4 @@
/**
*
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.client.CompactType;
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.LogQueryFilter;
import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.OnlineLogRecord;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.SnapshotDescription;
@ -639,7 +639,7 @@ public class ThriftAdmin implements Admin {
}
@Override
public boolean normalize() {
public boolean normalize(NormalizeTableFilterParams ntfp) {
throw new NotImplementedException("normalize not supported in ThriftAdmin");
}