From ecc0c6707784da442d22b92569308240c6cc1723 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 6 Aug 2020 19:07:46 -0700 Subject: [PATCH] HBASE-24627 Normalize one table at a time Introduce an additional method to our Admin interface that allow an operator to selectivly run the normalizer. The IPC protocol supports general table name select via compound filter. Signed-off-by: Sean Busbey Signed-off-by: Viraj Jasani --- .../org/apache/hadoop/hbase/client/Admin.java | 18 +- .../hbase/client/AdminOverAsyncAdmin.java | 4 +- .../hadoop/hbase/client/AsyncAdmin.java | 14 +- .../hadoop/hbase/client/AsyncHBaseAdmin.java | 6 +- .../client/NormalizeTableFilterParams.java | 107 ++++++++++++ .../hbase/client/RawAsyncHBaseAdmin.java | 18 +- .../hbase/shaded/protobuf/ProtobufUtil.java | 7 + .../shaded/protobuf/RequestConverter.java | 17 +- .../main/protobuf/server/master/Master.proto | 3 + .../apache/hadoop/hbase/master/HMaster.java | 36 ++-- .../hbase/master/MasterRpcServices.java | 10 +- .../TestSimpleRegionNormalizerOnCluster.java | 160 ++++++++++++++---- .../hbase/rsgroup/VerifyingRSGroupAdmin.java | 8 +- hbase-shell/src/main/ruby/hbase/admin.rb | 51 +++++- hbase-shell/src/main/ruby/hbase_constants.rb | 3 + .../src/main/ruby/shell/commands/normalize.rb | 20 ++- .../hbase/thrift2/client/ThriftAdmin.java | 6 +- 17 files changed, 407 insertions(+), 81 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 155f0249daa..40db1c1ac04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -856,10 +856,24 @@ public interface Admin extends Abortable, Closeable { * the request was submitted successfully. We need to check logs for the details of which regions * were split/merged. * - * @return true if region normalizer ran, false otherwise. + * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs */ - boolean normalize() throws IOException; + default boolean normalize() throws IOException { + return normalize(new NormalizeTableFilterParams.Builder().build()); + } + + /** + * Invoke region normalizer. Can NOT run for various reasons. Check logs. + * This is a non-blocking invocation to region normalizer. If return value is true, it means + * the request was submitted successfully. We need to check logs for the details of which regions + * were split/merged. + * + * @param ntfp limit to tables matching the specified filter. + * @return {@code true} if region normalizer ran, {@code false} otherwise. + * @throws IOException if a remote or network exception occurs + */ + boolean normalize(NormalizeTableFilterParams ntfp) throws IOException; /** * Query the current state of the region normalizer. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java index 1255753f05e..1b7a24bb36a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java @@ -395,8 +395,8 @@ class AdminOverAsyncAdmin implements Admin { } @Override - public boolean normalize() throws IOException { - return get(admin.normalize()); + public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException { + return get(admin.normalize(ntfp)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 336903d42e3..8c877e9c943 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -1279,7 +1279,17 @@ public interface AsyncAdmin { * @return true if region normalizer ran, false otherwise. The return value will be wrapped by a * {@link CompletableFuture} */ - CompletableFuture normalize(); + default CompletableFuture normalize() { + return normalize(new NormalizeTableFilterParams.Builder().build()); + } + + /** + * Invoke region normalizer. Can NOT run for various reasons. Check logs. + * @param ntfp limit to tables matching the specified filter. + * @return true if region normalizer ran, false otherwise. The return value will be wrapped by a + * {@link CompletableFuture} + */ + CompletableFuture normalize(NormalizeTableFilterParams ntfp); /** * Turn the cleaner chore on/off. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index c29fe7118dd..2301d4a811c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -709,8 +709,8 @@ class AsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture normalize() { - return wrap(rawAdmin.normalize()); + public CompletableFuture normalize(NormalizeTableFilterParams ntfp) { + return wrap(rawAdmin.normalize(ntfp)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java new file mode 100644 index 00000000000..982ec5b0065 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.util.List; +import org.apache.hadoop.hbase.TableName; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A collection of criteria used for table selection. The logic of table selection is as follows: + *
    + *
  • + * When no parameter values are provided, an unfiltered list of all user tables is returned. + *
  • + *
  • + * When a list of {@link TableName TableNames} are provided, the filter starts with any of + * these tables that exist. + *
  • + *
  • + * When a {@code namespace} name is provided, the filter starts with all the tables present in + * that namespace. + *
  • + *
  • + * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, + * the {@link TableName} list is honored and the {@code namespace} name is ignored. + *
  • + *
  • + * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further + * reduced to those that match the provided regular expression. + *
  • + *
+ */ +@InterfaceAudience.Public +public final class NormalizeTableFilterParams { + private final List tableNames; + private final String regex; + private final String namespace; + + private NormalizeTableFilterParams(final List tableNames, final String regex, + final String namespace) { + this.tableNames = tableNames; + this.regex = regex; + this.namespace = namespace; + } + + public List getTableNames() { + return tableNames; + } + + public String getRegex() { + return regex; + } + + public String getNamespace() { + return namespace; + } + + /** + * Used to instantiate an instance of {@link NormalizeTableFilterParams}. + */ + public static class Builder { + private List tableNames; + private String regex; + private String namespace; + + public Builder tableFilterParams(final NormalizeTableFilterParams ntfp) { + this.tableNames = ntfp.getTableNames(); + this.regex = ntfp.getRegex(); + this.namespace = ntfp.getNamespace(); + return this; + } + + public Builder tableNames(final List tableNames) { + this.tableNames = tableNames; + return this; + } + + public Builder regex(final String regex) { + this.regex = regex; + return this; + } + + public Builder namespace(final String namespace) { + this.namespace = namespace; + return this; + } + + public NormalizeTableFilterParams build() { + return new NormalizeTableFilterParams(tableNames, regex, namespace); + } + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 1330b2a1756..d740a3a26b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -3286,14 +3286,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { } @Override - public CompletableFuture normalize() { + public CompletableFuture normalize(NormalizeTableFilterParams ntfp) { + return normalize(RequestConverter.buildNormalizeRequest(ntfp)); + } + + private CompletableFuture normalize(NormalizeRequest request) { return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, RequestConverter.buildNormalizeRequest(), - (s, c, req, done) -> s.normalize(c, req, done), (resp) -> resp.getNormalizerRan())) - .call(); + . newMasterCaller() + .action( + (controller, stub) -> this.call( + controller, stub, request, MasterService.Interface::normalize, + NormalizeResponse::getNormalizerRan)) + .call(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 4a6adb180bc..ff202913b04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -2293,6 +2293,13 @@ public final class ProtobufUtil { .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build(); } + public static List toProtoTableNameList(List tableNameList) { + if (tableNameList == null) { + return new ArrayList<>(); + } + return tableNameList.stream().map(ProtobufUtil::toProtoTableName).collect(Collectors.toList()); + } + public static List toTableNameList(List tableNamesList) { if (tableNamesList == null) { return new ArrayList<>(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index 7b0282afc08..1352b7714bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.LogQueryFilter; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec; import org.apache.hadoop.hbase.client.RegionInfo; @@ -1476,8 +1477,18 @@ public final class RequestConverter { * * @return a NormalizeRequest */ - public static NormalizeRequest buildNormalizeRequest() { - return NormalizeRequest.newBuilder().build(); + public static NormalizeRequest buildNormalizeRequest(NormalizeTableFilterParams ntfp) { + final NormalizeRequest.Builder builder = NormalizeRequest.newBuilder(); + if (ntfp.getTableNames() != null) { + builder.addAllTableNames(ProtobufUtil.toProtoTableNameList(ntfp.getTableNames())); + } + if (ntfp.getRegex() != null) { + builder.setRegex(ntfp.getRegex()); + } + if (ntfp.getNamespace() != null) { + builder.setNamespace(ntfp.getNamespace()); + } + return builder.build(); } /** diff --git a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto index 286c96f688f..b70ddef034a 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/master/Master.proto @@ -354,6 +354,9 @@ message IsSplitOrMergeEnabledResponse { } message NormalizeRequest { + repeated TableName table_names = 1; + optional string regex = 2; + optional string namespace = 3; } message NormalizeResponse { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index af99aab9dbf..1eb6525995d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_SPLIT_COORDINATED import static org.apache.hadoop.hbase.HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS; import static org.apache.hadoop.hbase.HConstants.HBASE_SPLIT_WAL_COORDINATED_BY_ZK; import static org.apache.hadoop.hbase.util.DNS.MASTER_HOSTNAME_KEY; - import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; @@ -38,6 +37,7 @@ import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; @@ -80,9 +80,9 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.UnknownRegionException; -import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.RegionStatesCount; @@ -226,14 +226,13 @@ import org.eclipse.jetty.servlet.ServletHolder; import org.eclipse.jetty.webapp.WebAppContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; +import org.apache.hbase.thirdparty.com.google.common.collect.Sets; import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors; import org.apache.hbase.thirdparty.com.google.protobuf.Service; import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; - import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; @@ -1902,14 +1901,18 @@ public class HMaster extends HRegionServer implements MasterServices { return this.normalizer; } + public boolean normalizeRegions() throws IOException { + return normalizeRegions(new NormalizeTableFilterParams.Builder().build()); + } + /** - * Perform normalization of cluster (invoked by {@link RegionNormalizerChore}). + * Perform normalization of cluster. * * @return true if an existing normalization was already in progress, or if a new normalization * was performed successfully; false otherwise (specifically, if HMaster finished initializing * or normalization is globally disabled). */ - public boolean normalizeRegions() throws IOException { + public boolean normalizeRegions(final NormalizeTableFilterParams ntfp) throws IOException { final long startTime = EnvironmentEdgeManager.currentTime(); if (regionNormalizerTracker == null || !regionNormalizerTracker.isNormalizerOn()) { LOG.debug("Region normalization is disabled, don't run region normalizer."); @@ -1930,12 +1933,19 @@ public class HMaster extends HRegionServer implements MasterServices { int affectedTables = 0; try { - final List allEnabledTables = - new ArrayList<>(tableStateManager.getTablesInStates(TableState.State.ENABLED)); - Collections.shuffle(allEnabledTables); + final Set matchingTables = getTableDescriptors(new LinkedList<>(), + ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false) + .stream() + .map(TableDescriptor::getTableName) + .collect(Collectors.toSet()); + final Set allEnabledTables = + tableStateManager.getTablesInStates(TableState.State.ENABLED); + final List targetTables = + new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables)); + Collections.shuffle(targetTables); final List submittedPlanProcIds = new ArrayList<>(); - for (TableName table : allEnabledTables) { + for (TableName table : targetTables) { if (table.isSystemTable()) { continue; } @@ -3399,9 +3409,9 @@ public class HMaster extends HRegionServer implements MasterServices { } /** - * @return list of table table descriptors after filtering by regex and whether to include system - * tables, etc. - * @throws IOException + * Return a list of table table descriptors after applying any provided filter parameters. Note + * that the user-facing description of this filter logic is presented on the class-level javadoc + * of {@link NormalizeTableFilterParams}. */ private List getTableDescriptors(final List htds, final String namespace, final String regex, final List tableNameList, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java index c470acd6035..72040ae0b95 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.MasterSwitchType; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; @@ -1920,7 +1921,14 @@ public class MasterRpcServices extends RSRpcServices implements NormalizeRequest request) throws ServiceException { rpcPreCheck("normalize"); try { - return NormalizeResponse.newBuilder().setNormalizerRan(master.normalizeRegions()).build(); + final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() + .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList())) + .regex(request.hasRegex() ? request.getRegex() : null) + .namespace(request.hasNamespace() ? request.getNamespace() : null) + .build(); + return NormalizeResponse.newBuilder() + .setNormalizerRan(master.normalizeRegions(ntfp)) + .build(); } catch (IOException ex) { throw new ServiceException(ex); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java index da4c52ea8bd..ee9a160182d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.concurrent.TimeUnit; @@ -35,7 +36,8 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Size; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter.ExplainingPredicate; -import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.AsyncAdmin; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; @@ -79,7 +81,7 @@ public class TestSimpleRegionNormalizerOnCluster { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final byte[] FAMILY_NAME = Bytes.toBytes("fam"); - private static Admin admin; + private static AsyncAdmin admin; private static HMaster master; @Rule @@ -94,9 +96,12 @@ public class TestSimpleRegionNormalizerOnCluster { // no way for the test to set the regionId on a created region, so disable this feature. TEST_UTIL.getConfiguration().setInt("hbase.normalizer.merge.min_region_age.days", 0); + // disable the normalizer coming along and running via Chore + TEST_UTIL.getConfiguration().setInt("hbase.normalizer.period", Integer.MAX_VALUE); + TEST_UTIL.startMiniCluster(1); TestNamespaceAuditor.waitForQuotaInitialize(TEST_UTIL); - admin = TEST_UTIL.getAdmin(); + admin = TEST_UTIL.getAsyncConnection().getAdmin(); master = TEST_UTIL.getHBaseCluster().getMaster(); assertNotNull(master); } @@ -107,17 +112,17 @@ public class TestSimpleRegionNormalizerOnCluster { } @Before - public void before() throws IOException { + public void before() throws Exception { // disable the normalizer ahead of time, let the test enable it when its ready. - admin.normalizerSwitch(false); + admin.normalizerSwitch(false).get(); } @Test - public void testHonorsNormalizerSwitch() throws IOException { - assertFalse(admin.isNormalizerEnabled()); - assertFalse(admin.normalize()); - assertFalse(admin.normalizerSwitch(true)); - assertTrue(admin.normalize()); + public void testHonorsNormalizerSwitch() throws Exception { + assertFalse(admin.isNormalizerEnabled().get()); + assertFalse(admin.normalize().get()); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); } /** @@ -137,8 +142,8 @@ public class TestSimpleRegionNormalizerOnCluster { final int tn2RegionCount = createTableBegsSplit(tn2, false, false); final int tn3RegionCount = createTableBegsSplit(tn3, true, true); - assertFalse(admin.normalizerSwitch(true)); - assertTrue(admin.normalize()); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); waitForTableSplit(tn1, tn1RegionCount + 1); // confirm that tn1 has (tn1RegionCount + 1) number of regions. @@ -183,8 +188,8 @@ public class TestSimpleRegionNormalizerOnCluster { final int currentRegionCount = createTableBegsSplit(tableName, true, false); final long existingSkippedSplitCount = master.getRegionNormalizer() .getSkippedCount(PlanType.SPLIT); - assertFalse(admin.normalizerSwitch(true)); - assertTrue(admin.normalize()); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); if (limitedByQuota) { waitForSkippedSplits(master, existingSkippedSplitCount); assertEquals( @@ -208,8 +213,8 @@ public class TestSimpleRegionNormalizerOnCluster { final TableName tableName = TableName.valueOf(name.getMethodName()); try { final int currentRegionCount = createTableBegsMerge(tableName); - assertFalse(admin.normalizerSwitch(true)); - assertTrue(admin.normalize()); + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize().get()); waitForTableMerge(tableName, currentRegionCount - 1); assertEquals( tableName + " should have merged.", @@ -220,14 +225,103 @@ public class TestSimpleRegionNormalizerOnCluster { } } - private static TableName buildTableNameForQuotaTest(final String methodName) throws IOException { + @Test + public void testHonorsNamespaceFilter() throws Exception { + final NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build(); + final TableName tn1 = TableName.valueOf("ns", name.getMethodName()); + final TableName tn2 = TableName.valueOf(name.getMethodName()); + + try { + admin.createNamespace(namespaceDescriptor).get(); + final int tn1RegionCount = createTableBegsSplit(tn1, true, false); + final int tn2RegionCount = createTableBegsSplit(tn2, true, false); + final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() + .namespace("ns") + .build(); + + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize(ntfp).get()); + waitForTableSplit(tn1, tn1RegionCount + 1); + + // confirm that tn1 has (tn1RegionCount + 1) number of regions. + // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace. + assertEquals( + tn1 + " should have split.", + tn1RegionCount + 1, + MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1)); + waitForTableRegionCount(tn2, tn2RegionCount); + } finally { + dropIfExists(tn1); + dropIfExists(tn2); + } + } + + @Test + public void testHonorsPatternFilter() throws Exception { + final TableName tn1 = TableName.valueOf(name.getMethodName() + "1"); + final TableName tn2 = TableName.valueOf(name.getMethodName() + "2"); + + try { + final int tn1RegionCount = createTableBegsSplit(tn1, true, false); + final int tn2RegionCount = createTableBegsSplit(tn2, true, false); + final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() + .regex(".*[1]") + .build(); + + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize(ntfp).get()); + waitForTableSplit(tn1, tn1RegionCount + 1); + + // confirm that tn1 has (tn1RegionCount + 1) number of regions. + // tn2 has tn2RegionCount number of regions because it fails filter. + assertEquals( + tn1 + " should have split.", + tn1RegionCount + 1, + MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1)); + waitForTableRegionCount(tn2, tn2RegionCount); + } finally { + dropIfExists(tn1); + dropIfExists(tn2); + } + } + + @Test + public void testHonorsNameFilter() throws Exception { + final TableName tn1 = TableName.valueOf(name.getMethodName() + "1"); + final TableName tn2 = TableName.valueOf(name.getMethodName() + "2"); + + try { + final int tn1RegionCount = createTableBegsSplit(tn1, true, false); + final int tn2RegionCount = createTableBegsSplit(tn2, true, false); + final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder() + .tableNames(Collections.singletonList(tn1)) + .build(); + + assertFalse(admin.normalizerSwitch(true).get()); + assertTrue(admin.normalize(ntfp).get()); + waitForTableSplit(tn1, tn1RegionCount + 1); + + // confirm that tn1 has (tn1RegionCount + 1) number of regions. + // tn2 has tn3RegionCount number of regions because it fails filter: + assertEquals( + tn1 + " should have split.", + tn1RegionCount + 1, + MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1)); + waitForTableRegionCount(tn2, tn2RegionCount); + } finally { + dropIfExists(tn1); + dropIfExists(tn2); + } + } + + private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception { String nsp = "np2"; NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); - admin.createNamespace(nspDesc); - return TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + methodName); + admin.createNamespace(nspDesc).get(); + return TableName.valueOf(nsp, methodName); } private static void waitForSkippedSplits(final HMaster master, @@ -347,16 +441,17 @@ public class TestSimpleRegionNormalizerOnCluster { */ private static int createTableBegsSplit(final TableName tableName, final boolean normalizerEnabled, final boolean isMergeEnabled) - throws IOException { + throws Exception { final List generatedRegions = generateTestData(tableName, 1, 1, 2, 3, 5); assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName)); - admin.flush(tableName); + admin.flush(tableName).get(); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName)) + final TableDescriptor td = TableDescriptorBuilder + .newBuilder(admin.getDescriptor(tableName).get()) .setNormalizationEnabled(normalizerEnabled) .setMergeEnabled(isMergeEnabled) .build(); - admin.modifyTable(td); + admin.modifyTable(td).get(); // make sure relatively accurate region statistics are available for the test table. use // the last/largest region as clue. @@ -383,16 +478,17 @@ public class TestSimpleRegionNormalizerOnCluster { *
  • sum of sizes of first two regions < average
  • * */ - private static int createTableBegsMerge(final TableName tableName) throws IOException { + private static int createTableBegsMerge(final TableName tableName) throws Exception { // create 5 regions with sizes to trigger merge of small regions final List generatedRegions = generateTestData(tableName, 1, 1, 3, 3, 5); assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName)); - admin.flush(tableName); + admin.flush(tableName).get(); - final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName)) + final TableDescriptor td = TableDescriptorBuilder + .newBuilder(admin.getDescriptor(tableName).get()) .setNormalizationEnabled(true) .build(); - admin.modifyTable(td); + admin.modifyTable(td).get(); // make sure relatively accurate region statistics are available for the test table. use // the last/largest region as clue. @@ -411,12 +507,12 @@ public class TestSimpleRegionNormalizerOnCluster { return 5; } - private static void dropIfExists(final TableName tableName) throws IOException { - if (tableName != null && admin.tableExists(tableName)) { - if (admin.isTableEnabled(tableName)) { - admin.disableTable(tableName); + private static void dropIfExists(final TableName tableName) throws Exception { + if (tableName != null && admin.tableExists(tableName).get()) { + if (admin.isTableEnabled(tableName).get()) { + admin.disableTable(tableName).get(); } - admin.deleteTable(tableName); + admin.deleteTable(tableName).get(); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java index aad0d416d26..16aa12f601a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.CompactType; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.OnlineLogRecord; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Result; @@ -341,8 +342,9 @@ public class VerifyingRSGroupAdmin implements Admin, Closeable { return admin.clearBlockCache(tableName); } - public boolean normalize() throws IOException { - return admin.normalize(); + @Override + public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException { + return admin.normalize(ntfp); } public boolean isNormalizerEnabled() throws IOException { diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index f298a123dbc..5392cdf8b8e 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -258,9 +258,54 @@ module Hbase #---------------------------------------------------------------------------------------------- # Requests region normalization for all configured tables in the cluster - # Returns true if normalizer ran successfully - def normalize - @admin.normalize + # Returns true if normalize request was successfully submitted + def normalize(*args) + builder = org.apache.hadoop.hbase.client.NormalizeTableFilterParams::Builder.new + args.each do |arg| + unless arg.is_a?(String) || arg.is_a?(Hash) + raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type") + end + + if arg.key?(TABLE_NAME) + table_name = arg.delete(TABLE_NAME) + unless table_name.is_a?(String) + raise(ArgumentError, "#{TABLE_NAME} must be of type String") + end + + builder.tableNames(java.util.Collections.singletonList(TableName.valueOf(table_name))) + elsif arg.key?(TABLE_NAMES) + table_names = arg.delete(TABLE_NAMES) + unless table_names.is_a?(Array) + raise(ArgumentError, "#{TABLE_NAMES} must be of type Array") + end + + table_name_list = java.util.LinkedList.new + table_names.each do |tn| + unless tn.is_a?(String) + raise(ArgumentError, "#{TABLE_NAMES} value #{tn} must be of type String") + end + + table_name_list.add(TableName.valueOf(tn)) + end + builder.tableNames(table_name_list) + elsif arg.key?(REGEX) + regex = arg.delete(REGEX) + raise(ArgumentError, "#{REGEX} must be of type String") unless regex.is_a?(String) + + builder.regex(regex) + elsif arg.key?(NAMESPACE) + namespace = arg.delete(NAMESPACE) + unless namespace.is_a?(String) + raise(ArgumentError, "#{NAMESPACE} must be of type String") + end + + builder.namespace(namespace) + else + raise(ArgumentError, "Unrecognized argument #{arg}") + end + end + ntfp = builder.build + @admin.normalize(ntfp) end #---------------------------------------------------------------------------------------------- diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb index 6db91c7cd02..b1b0eaee667 100644 --- a/hbase-shell/src/main/ruby/hbase_constants.rb +++ b/hbase-shell/src/main/ruby/hbase_constants.rb @@ -71,6 +71,7 @@ module HBaseConstants POLICY = 'POLICY'.freeze RAW = 'RAW'.freeze READ_TYPE = 'READ_TYPE'.freeze + REGEX = 'REGEX'.freeze REGIONSERVER = 'REGIONSERVER'.freeze REGION_REPLICATION = 'REGION_REPLICATION'.freeze REGION_REPLICA_ID = 'REGION_REPLICA_ID'.freeze @@ -91,6 +92,8 @@ module HBaseConstants STOPROW = 'STOPROW'.freeze TABLE = 'TABLE'.freeze TABLE_CFS = 'TABLE_CFS'.freeze + TABLE_NAME = 'TABLE_NAME'.freeze + TABLE_NAMES = 'TABLE_NAMES'.freeze TIMERANGE = 'TIMERANGE'.freeze TIMESTAMP = 'TIMESTAMP'.freeze TYPE = 'TYPE'.freeze diff --git a/hbase-shell/src/main/ruby/shell/commands/normalize.rb b/hbase-shell/src/main/ruby/shell/commands/normalize.rb index 2840e845bd6..70e524ae11c 100644 --- a/hbase-shell/src/main/ruby/shell/commands/normalize.rb +++ b/hbase-shell/src/main/ruby/shell/commands/normalize.rb @@ -22,18 +22,24 @@ module Shell class Normalize < Command def help <<-EOF -Trigger region normalizer for all tables which have NORMALIZATION_ENABLED flag set. Returns true - if normalizer ran successfully, false otherwise. Note that this command has no effect - if region normalizer is disabled (make sure it's turned on using 'normalizer_switch' command). +Trigger the region normalizer. Without arguments, invokes the normalizer without a table filter. +Any arguments are used to limit table selection. Returns true if the normalize request was +submitted successfully, false otherwise. Note that this command has no effect if region normalizer +is disabled (make sure it's turned on using 'normalizer_switch' command). - Examples: +Examples: - hbase> normalize + hbase> normalize + hbase> normalize TABLE_NAME => 'my_table' + hbase> normalize TABLE_NAMES => ['foo', 'bar', 'baz'] + hbase> normalize REGEX => 'my_.*' + hbase> normalize NAMESPACE => 'ns1' + hbase> normalize NAMESPACE => 'ns', REGEX => '*._BIG_.*' EOF end - def command - did_normalize_run = !!admin.normalize + def command(*args) + did_normalize_run = !!admin.normalize(*args) formatter.row([did_normalize_run.to_s]) did_normalize_run end diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java index 9758d087b70..633238bc870 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,6 +43,7 @@ import org.apache.hadoop.hbase.client.CompactType; import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.LogQueryFilter; +import org.apache.hadoop.hbase.client.NormalizeTableFilterParams; import org.apache.hadoop.hbase.client.OnlineLogRecord; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.SnapshotDescription; @@ -639,7 +639,7 @@ public class ThriftAdmin implements Admin { } @Override - public boolean normalize() { + public boolean normalize(NormalizeTableFilterParams ntfp) { throw new NotImplementedException("normalize not supported in ThriftAdmin"); }