HBASE-17942 Disable region splits and merges per table
Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
parent
6176471957
commit
593745e8ac
|
@ -57,6 +57,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
|
||||||
public static final Bytes OWNER_KEY = TableDescriptorBuilder.OWNER_KEY;
|
public static final Bytes OWNER_KEY = TableDescriptorBuilder.OWNER_KEY;
|
||||||
public static final String READONLY = TableDescriptorBuilder.READONLY;
|
public static final String READONLY = TableDescriptorBuilder.READONLY;
|
||||||
public static final String COMPACTION_ENABLED = TableDescriptorBuilder.COMPACTION_ENABLED;
|
public static final String COMPACTION_ENABLED = TableDescriptorBuilder.COMPACTION_ENABLED;
|
||||||
|
public static final String SPLIT_ENABLED = TableDescriptorBuilder.SPLIT_ENABLED;
|
||||||
|
public static final String MERGE_ENABLED = TableDescriptorBuilder.MERGE_ENABLED;
|
||||||
public static final String MEMSTORE_FLUSHSIZE = TableDescriptorBuilder.MEMSTORE_FLUSHSIZE;
|
public static final String MEMSTORE_FLUSHSIZE = TableDescriptorBuilder.MEMSTORE_FLUSHSIZE;
|
||||||
public static final String FLUSH_POLICY = TableDescriptorBuilder.FLUSH_POLICY;
|
public static final String FLUSH_POLICY = TableDescriptorBuilder.FLUSH_POLICY;
|
||||||
public static final String IS_ROOT = "IS_ROOT";
|
public static final String IS_ROOT = "IS_ROOT";
|
||||||
|
@ -271,6 +273,49 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the region split enable flag of the table is true. If flag is
|
||||||
|
* false then no split will be done.
|
||||||
|
*
|
||||||
|
* @return true if table region split enabled
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isSplitEnabled() {
|
||||||
|
return delegatee.isSplitEnabled();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setting the table region split enable flag.
|
||||||
|
*
|
||||||
|
* @param isEnable True if enable split.
|
||||||
|
*/
|
||||||
|
public HTableDescriptor setSplitEnabled(final boolean isEnable) {
|
||||||
|
getDelegateeForModification().setSplitEnabled(isEnable);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the region merge enable flag of the table is true. If flag is
|
||||||
|
* false then no merge will be done.
|
||||||
|
*
|
||||||
|
* @return true if table region merge enabled
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isMergeEnabled() {
|
||||||
|
return delegatee.isMergeEnabled();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setting the table region merge enable flag.
|
||||||
|
*
|
||||||
|
* @param isEnable True if enable merge.
|
||||||
|
*/
|
||||||
|
public HTableDescriptor setMergeEnabled(final boolean isEnable) {
|
||||||
|
getDelegateeForModification().setMergeEnabled(isEnable);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if normalization enable flag of the table is true. If flag is
|
* Check if normalization enable flag of the table is true. If flag is
|
||||||
* false then no region normalizer won't attempt to normalize this table.
|
* false then no region normalizer won't attempt to normalize this table.
|
||||||
|
|
|
@ -1323,8 +1323,8 @@ public interface Admin extends Abortable, Closeable {
|
||||||
boolean forcible) throws IOException;
|
boolean forcible) throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
+ Split a table. The method will execute split action for each region in table.
|
* Split a table. The method will execute split action for each region in table.
|
||||||
+ Asynchronous operation.
|
* Asynchronous operation.
|
||||||
* @param tableName table to split
|
* @param tableName table to split
|
||||||
* @throws IOException if a remote or network exception occurs
|
* @throws IOException if a remote or network exception occurs
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -238,6 +238,22 @@ public interface TableDescriptor {
|
||||||
*/
|
*/
|
||||||
boolean isCompactionEnabled();
|
boolean isCompactionEnabled();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the split enable flag of the table is true. If flag is false
|
||||||
|
* then no region split will be done.
|
||||||
|
*
|
||||||
|
* @return true if table region split enabled
|
||||||
|
*/
|
||||||
|
boolean isSplitEnabled();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the merge enable flag of the table is true. If flag is false
|
||||||
|
* then no region merge will be done.
|
||||||
|
*
|
||||||
|
* @return true if table region merge enabled
|
||||||
|
*/
|
||||||
|
boolean isMergeEnabled();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if this table is <code> hbase:meta </code> region.
|
* Checks if this table is <code> hbase:meta </code> region.
|
||||||
*
|
*
|
||||||
|
|
|
@ -91,6 +91,22 @@ public class TableDescriptorBuilder {
|
||||||
private static final Bytes COMPACTION_ENABLED_KEY
|
private static final Bytes COMPACTION_ENABLED_KEY
|
||||||
= new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
|
= new Bytes(Bytes.toBytes(COMPACTION_ENABLED));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used by HBase Shell interface to access this metadata
|
||||||
|
* attribute which denotes if the table is split enabled.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public static final String SPLIT_ENABLED = "SPLIT_ENABLED";
|
||||||
|
private static final Bytes SPLIT_ENABLED_KEY = new Bytes(Bytes.toBytes(SPLIT_ENABLED));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used by HBase Shell interface to access this metadata
|
||||||
|
* attribute which denotes if the table is merge enabled.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public static final String MERGE_ENABLED = "MERGE_ENABLED";
|
||||||
|
private static final Bytes MERGE_ENABLED_KEY = new Bytes(Bytes.toBytes(MERGE_ENABLED));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used by HBase Shell interface to access this metadata
|
* Used by HBase Shell interface to access this metadata
|
||||||
* attribute which represents the maximum size of the memstore after which its
|
* attribute which represents the maximum size of the memstore after which its
|
||||||
|
@ -187,6 +203,16 @@ public class TableDescriptorBuilder {
|
||||||
*/
|
*/
|
||||||
public static final boolean DEFAULT_COMPACTION_ENABLED = true;
|
public static final boolean DEFAULT_COMPACTION_ENABLED = true;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constant that denotes whether the table is split enabled by default
|
||||||
|
*/
|
||||||
|
public static final boolean DEFAULT_SPLIT_ENABLED = true;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constant that denotes whether the table is merge enabled by default
|
||||||
|
*/
|
||||||
|
public static final boolean DEFAULT_MERGE_ENABLED = true;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constant that denotes whether the table is normalized by default.
|
* Constant that denotes whether the table is normalized by default.
|
||||||
*/
|
*/
|
||||||
|
@ -387,6 +413,16 @@ public class TableDescriptorBuilder {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public TableDescriptorBuilder setSplitEnabled(final boolean isEnable) {
|
||||||
|
desc.setSplitEnabled(isEnable);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public TableDescriptorBuilder setMergeEnabled(final boolean isEnable) {
|
||||||
|
desc.setMergeEnabled(isEnable);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
public TableDescriptorBuilder setDurability(Durability durability) {
|
public TableDescriptorBuilder setDurability(Durability durability) {
|
||||||
desc.setDurability(durability);
|
desc.setDurability(durability);
|
||||||
return this;
|
return this;
|
||||||
|
@ -735,6 +771,48 @@ public class TableDescriptorBuilder {
|
||||||
return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
|
return setValue(COMPACTION_ENABLED_KEY, Boolean.toString(isEnable));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the split enable flag of the table is true. If flag is false then no split will be
|
||||||
|
* done.
|
||||||
|
*
|
||||||
|
* @return true if table region split enabled
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isSplitEnabled() {
|
||||||
|
return getOrDefault(SPLIT_ENABLED_KEY, Boolean::valueOf, DEFAULT_SPLIT_ENABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setting the table region split enable flag.
|
||||||
|
* @param isEnable True if enable region split.
|
||||||
|
*
|
||||||
|
* @return the modifyable TD
|
||||||
|
*/
|
||||||
|
public ModifyableTableDescriptor setSplitEnabled(final boolean isEnable) {
|
||||||
|
return setValue(SPLIT_ENABLED_KEY, Boolean.toString(isEnable));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if the region merge enable flag of the table is true. If flag is false then no merge
|
||||||
|
* will be done.
|
||||||
|
*
|
||||||
|
* @return true if table region merge enabled
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public boolean isMergeEnabled() {
|
||||||
|
return getOrDefault(MERGE_ENABLED_KEY, Boolean::valueOf, DEFAULT_MERGE_ENABLED);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Setting the table region merge enable flag.
|
||||||
|
* @param isEnable True if enable region merge.
|
||||||
|
*
|
||||||
|
* @return the modifyable TD
|
||||||
|
*/
|
||||||
|
public ModifyableTableDescriptor setMergeEnabled(final boolean isEnable) {
|
||||||
|
return setValue(MERGE_ENABLED_KEY, Boolean.toString(isEnable));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if normalization enable flag of the table is true. If flag is false
|
* Check if normalization enable flag of the table is true. If flag is false
|
||||||
* then no region normalizer won't attempt to normalize this table.
|
* then no region normalizer won't attempt to normalize this table.
|
||||||
|
|
|
@ -523,6 +523,14 @@ public class MergeTableRegionsProcedure
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isMergeEnabled()) {
|
||||||
|
String regionsStr = Arrays.deepToString(regionsToMerge);
|
||||||
|
LOG.warn("Merge is disabled for the table! Skipping merge of {}", regionsStr);
|
||||||
|
super.setFailure(getClass().getSimpleName(), new IOException(
|
||||||
|
"Merge of " + regionsStr + " failed as region merge is disabled for the table"));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Ask the remote regionserver if regions are mergeable. If we get an IOE, report it
|
// Ask the remote regionserver if regions are mergeable. If we get an IOE, report it
|
||||||
// along with the failure, so we can see why regions are not mergeable at this time.
|
// along with the failure, so we can see why regions are not mergeable at this time.
|
||||||
IOException mergeableCheckIOE = null;
|
IOException mergeableCheckIOE = null;
|
||||||
|
|
|
@ -504,6 +504,14 @@ public class SplitTableRegionProcedure
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!env.getMasterServices().getTableDescriptors().get(getTableName()).isSplitEnabled()) {
|
||||||
|
LOG.warn("pid={}, split is disabled for the table! Skipping split of {}", getProcId(),
|
||||||
|
parentHRI);
|
||||||
|
setFailure(new IOException("Split region " + parentHRI.getRegionNameAsString()
|
||||||
|
+ " failed as region split is disabled for the table"));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// set node state as SPLITTING
|
// set node state as SPLITTING
|
||||||
node.setState(State.SPLITTING);
|
node.setState(State.SPLITTING);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,283 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
|
|
||||||
|
@Category({ MediumTests.class, ClientTests.class })
|
||||||
|
public class TestSplitOrMergeAtTableLevel {
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestSplitOrMergeAtTableLevel.class);
|
||||||
|
|
||||||
|
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
private static byte[] FAMILY = Bytes.toBytes("testFamily");
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TestName name = new TestName();
|
||||||
|
private static Admin admin;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setUpBeforeClass() throws Exception {
|
||||||
|
TEST_UTIL.startMiniCluster(2);
|
||||||
|
admin = TEST_UTIL.getAdmin();
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void tearDownAfterClass() throws Exception {
|
||||||
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableSplitSwitch() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
|
||||||
|
.setSplitEnabled(false).build();
|
||||||
|
|
||||||
|
// create a table with split disabled
|
||||||
|
Table t = TEST_UTIL.createTable(tableDesc, null);
|
||||||
|
TEST_UTIL.waitTableAvailable(tableName);
|
||||||
|
|
||||||
|
// load data into the table
|
||||||
|
TEST_UTIL.loadTable(t, FAMILY, false);
|
||||||
|
|
||||||
|
assertTrue(admin.getRegions(tableName).size() == 1);
|
||||||
|
|
||||||
|
// check that we have split disabled
|
||||||
|
assertFalse(admin.getDescriptor(tableName).isSplitEnabled());
|
||||||
|
trySplitAndEnsureItFails(tableName);
|
||||||
|
enableTableSplit(tableName);
|
||||||
|
trySplitAndEnsureItIsSuccess(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableSplitSwitchForPreSplittedTable() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
|
||||||
|
// create a table with split disabled
|
||||||
|
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
|
||||||
|
.setSplitEnabled(false)
|
||||||
|
.build();
|
||||||
|
Table t = TEST_UTIL.createTable(tableDesc, new byte[][] { Bytes.toBytes(10) });
|
||||||
|
TEST_UTIL.waitTableAvailable(tableName);
|
||||||
|
|
||||||
|
// load data into the table
|
||||||
|
TEST_UTIL.loadTable(t, FAMILY, false);
|
||||||
|
|
||||||
|
assertTrue(admin.getRegions(tableName).size() == 2);
|
||||||
|
|
||||||
|
// check that we have split disabled
|
||||||
|
assertFalse(admin.getDescriptor(tableName).isSplitEnabled());
|
||||||
|
trySplitAndEnsureItFails(tableName);
|
||||||
|
enableTableSplit(tableName);
|
||||||
|
trySplitAndEnsureItIsSuccess(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableMergeSwitch() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
|
||||||
|
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
|
||||||
|
.setMergeEnabled(false)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
Table t = TEST_UTIL.createTable(tableDesc, null);
|
||||||
|
TEST_UTIL.waitTableAvailable(tableName);
|
||||||
|
TEST_UTIL.loadTable(t, FAMILY, false);
|
||||||
|
|
||||||
|
// check merge is disabled for the table
|
||||||
|
assertFalse(admin.getDescriptor(tableName).isMergeEnabled());
|
||||||
|
|
||||||
|
trySplitAndEnsureItIsSuccess(tableName);
|
||||||
|
Threads.sleep(10000);
|
||||||
|
tryMergeAndEnsureItFails(tableName);
|
||||||
|
admin.disableTable(tableName);
|
||||||
|
enableTableMerge(tableName);
|
||||||
|
admin.enableTable(tableName);
|
||||||
|
tryMergeAndEnsureItIsSuccess(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTableMergeSwitchForPreSplittedTable() throws Exception {
|
||||||
|
final TableName tableName = TableName.valueOf(name.getMethodName());
|
||||||
|
|
||||||
|
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
|
||||||
|
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY))
|
||||||
|
.setMergeEnabled(false)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
Table t = TEST_UTIL.createTable(tableDesc, new byte[][] { Bytes.toBytes(10) });
|
||||||
|
TEST_UTIL.waitTableAvailable(tableName);
|
||||||
|
TEST_UTIL.loadTable(t, FAMILY, false);
|
||||||
|
|
||||||
|
// check merge is disabled for the table
|
||||||
|
assertFalse(admin.getDescriptor(tableName).isMergeEnabled());
|
||||||
|
assertTrue(admin.getRegions(tableName).size() == 2);
|
||||||
|
tryMergeAndEnsureItFails(tableName);
|
||||||
|
enableTableMerge(tableName);
|
||||||
|
tryMergeAndEnsureItIsSuccess(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void trySplitAndEnsureItFails(final TableName tableName) throws Exception {
|
||||||
|
// get the original table region count
|
||||||
|
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||||
|
int originalCount = regions.size();
|
||||||
|
|
||||||
|
// split the table and make sure region count does not increase
|
||||||
|
Future<?> f = admin.splitRegionAsync(regions.get(0).getEncodedNameAsBytes(), Bytes.toBytes(2));
|
||||||
|
try {
|
||||||
|
f.get(10, TimeUnit.SECONDS);
|
||||||
|
fail("Should not get here.");
|
||||||
|
} catch (ExecutionException ee) {
|
||||||
|
// expected to reach here
|
||||||
|
// check and ensure that table does not get splitted
|
||||||
|
assertTrue(admin.getRegions(tableName).size() == originalCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Method to enable split for the passed table and validate this modification.
|
||||||
|
* @param tableName name of the table
|
||||||
|
*/
|
||||||
|
private void enableTableSplit(final TableName tableName) throws Exception {
|
||||||
|
// Get the original table descriptor
|
||||||
|
TableDescriptor originalTableDesc = admin.getDescriptor(tableName);
|
||||||
|
TableDescriptor modifiedTableDesc = TableDescriptorBuilder.newBuilder(originalTableDesc)
|
||||||
|
.setSplitEnabled(true)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Now modify the table descriptor and enable split for it
|
||||||
|
admin.modifyTable(modifiedTableDesc);
|
||||||
|
|
||||||
|
// Verify that split is enabled
|
||||||
|
assertTrue(admin.getDescriptor(tableName).isSplitEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void trySplitAndEnsureItIsSuccess(final TableName tableName)
|
||||||
|
throws Exception {
|
||||||
|
// get the original table region count
|
||||||
|
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||||
|
int originalCount = regions.size();
|
||||||
|
|
||||||
|
// split the table and wait until region count increases
|
||||||
|
admin.split(tableName, Bytes.toBytes(3));
|
||||||
|
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean evaluate() throws Exception {
|
||||||
|
return admin.getRegions(tableName).size() > originalCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainFailure() throws Exception {
|
||||||
|
return "Split has not finished yet";
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
private void tryMergeAndEnsureItFails(final TableName tableName) throws Exception {
|
||||||
|
// assert we have at least 2 regions in the table
|
||||||
|
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||||
|
int originalCount = regions.size();
|
||||||
|
assertTrue(originalCount >= 2);
|
||||||
|
|
||||||
|
byte[] nameOfRegionA = regions.get(0).getEncodedNameAsBytes();
|
||||||
|
byte[] nameOfRegionB = regions.get(1).getEncodedNameAsBytes();
|
||||||
|
|
||||||
|
// check and ensure that region do not get merged
|
||||||
|
Future<?> f = admin.mergeRegionsAsync(nameOfRegionA, nameOfRegionB, true);
|
||||||
|
try {
|
||||||
|
f.get(10, TimeUnit.SECONDS);
|
||||||
|
fail("Should not get here.");
|
||||||
|
} catch (ExecutionException ee) {
|
||||||
|
// expected to reach here
|
||||||
|
// check and ensure that region do not get merged
|
||||||
|
assertTrue(admin.getRegions(tableName).size() == originalCount);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Method to enable merge for the passed table and validate this modification.
|
||||||
|
* @param tableName name of the table
|
||||||
|
*/
|
||||||
|
private void enableTableMerge(final TableName tableName) throws Exception {
|
||||||
|
// Get the original table descriptor
|
||||||
|
TableDescriptor originalTableDesc = admin.getDescriptor(tableName);
|
||||||
|
TableDescriptor modifiedTableDesc = TableDescriptorBuilder.newBuilder(originalTableDesc)
|
||||||
|
.setMergeEnabled(true)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
// Now modify the table descriptor and enable merge for it
|
||||||
|
admin.modifyTable(modifiedTableDesc);
|
||||||
|
|
||||||
|
// Verify that merge is enabled
|
||||||
|
assertTrue(admin.getDescriptor(tableName).isMergeEnabled());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void tryMergeAndEnsureItIsSuccess(final TableName tableName) throws Exception {
|
||||||
|
// assert we have at least 2 regions in the table
|
||||||
|
List<RegionInfo> regions = admin.getRegions(tableName);
|
||||||
|
int originalCount = regions.size();
|
||||||
|
assertTrue(originalCount >= 2);
|
||||||
|
|
||||||
|
byte[] nameOfRegionA = regions.get(0).getEncodedNameAsBytes();
|
||||||
|
byte[] nameOfRegionB = regions.get(1).getEncodedNameAsBytes();
|
||||||
|
|
||||||
|
// merge the table regions and wait until region count decreases
|
||||||
|
admin.mergeRegionsAsync(nameOfRegionA, nameOfRegionB, true);
|
||||||
|
TEST_UTIL.waitFor(30000, new ExplainingPredicate<Exception>() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean evaluate() throws Exception {
|
||||||
|
return admin.getRegions(tableName).size() < originalCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String explainFailure() throws Exception {
|
||||||
|
return "Merge has not finished yet";
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
|
@ -1268,6 +1268,8 @@ module Hbase
|
||||||
htd.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE)
|
htd.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE)
|
||||||
htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY)
|
htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY)
|
||||||
htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED)
|
htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED)
|
||||||
|
htd.setSplitEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_ENABLED)
|
||||||
|
htd.setMergeEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MERGE_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MERGE_ENABLED)
|
||||||
htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED)
|
htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED)
|
||||||
htd.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT)
|
htd.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT)
|
||||||
htd.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE)
|
htd.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE)
|
||||||
|
|
|
@ -86,6 +86,11 @@ You can also set REGION_REPLICATION:
|
||||||
|
|
||||||
hbase> alter 't1', {REGION_REPLICATION => 2}
|
hbase> alter 't1', {REGION_REPLICATION => 2}
|
||||||
|
|
||||||
|
You can disable/enable table split and/or merge:
|
||||||
|
|
||||||
|
hbase> alter 't1', {SPLIT_ENABLED => false}
|
||||||
|
hbase> alter 't1', {MERGE_ENABLED => false}
|
||||||
|
|
||||||
There could be more than one alteration in one command:
|
There could be more than one alteration in one command:
|
||||||
|
|
||||||
hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 },
|
hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 },
|
||||||
|
|
|
@ -51,6 +51,7 @@ Examples:
|
||||||
hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname)
|
hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname)
|
||||||
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
|
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
|
||||||
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit', REGION_REPLICATION => 2, CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'}}
|
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit', REGION_REPLICATION => 2, CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'}}
|
||||||
|
hbase> create 't1', 'f1', {SPLIT_ENABLED => false, MERGE_ENABLED => false}
|
||||||
hbase> create 't1', {NAME => 'f1', DFS_REPLICATION => 1}
|
hbase> create 't1', {NAME => 'f1', DFS_REPLICATION => 1}
|
||||||
|
|
||||||
You can also keep around a reference to the created table:
|
You can also keep around a reference to the created table:
|
||||||
|
|
|
@ -225,13 +225,17 @@ module Hbase
|
||||||
FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy',
|
FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy',
|
||||||
REGION_MEMSTORE_REPLICATION => 'TRUE',
|
REGION_MEMSTORE_REPLICATION => 'TRUE',
|
||||||
SPLIT_POLICY => 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy',
|
SPLIT_POLICY => 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy',
|
||||||
COMPACTION_ENABLED => 'false')
|
COMPACTION_ENABLED => 'false',
|
||||||
|
SPLIT_ENABLED => 'false',
|
||||||
|
MERGE_ENABLED => 'false')
|
||||||
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
|
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
|
||||||
assert_match(/12345678/, admin.describe(@create_test_name))
|
assert_match(/12345678/, admin.describe(@create_test_name))
|
||||||
assert_match(/987654321/, admin.describe(@create_test_name))
|
assert_match(/987654321/, admin.describe(@create_test_name))
|
||||||
assert_match(/77/, admin.describe(@create_test_name))
|
assert_match(/77/, admin.describe(@create_test_name))
|
||||||
assert_match(/COMPACTION_ENABLED/, admin.describe(@create_test_name))
|
assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
assert_match(/REGION_MEMSTORE_REPLICATION/, admin.describe(@create_test_name))
|
assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
|
assert_match(/'MERGE_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
|
assert_match(/'REGION_MEMSTORE_REPLICATION' => 'true'/, admin.describe(@create_test_name))
|
||||||
assert_match(/org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy/,
|
assert_match(/org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy/,
|
||||||
admin.describe(@create_test_name))
|
admin.describe(@create_test_name))
|
||||||
assert_match(/org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy/,
|
assert_match(/org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy/,
|
||||||
|
@ -254,10 +258,15 @@ module Hbase
|
||||||
|
|
||||||
define_test "create should work when attributes value 'false' is not enclosed in single quotation marks" do
|
define_test "create should work when attributes value 'false' is not enclosed in single quotation marks" do
|
||||||
drop_test_table(@create_test_name)
|
drop_test_table(@create_test_name)
|
||||||
command(:create, @create_test_name,{NAME => 'a', BLOCKCACHE => false}, {COMPACTION_ENABLED => false})
|
command(:create, @create_test_name, {NAME => 'a', BLOCKCACHE => false},
|
||||||
|
COMPACTION_ENABLED => false,
|
||||||
|
SPLIT_ENABLED => false,
|
||||||
|
MERGE_ENABLED => false)
|
||||||
assert_equal(['a:'], table(@create_test_name).get_all_columns.sort)
|
assert_equal(['a:'], table(@create_test_name).get_all_columns.sort)
|
||||||
assert_match(/BLOCKCACHE/, admin.describe(@create_test_name))
|
assert_match(/BLOCKCACHE => 'false'/, admin.describe(@create_test_name))
|
||||||
assert_match(/COMPACTION_ENABLED/, admin.describe(@create_test_name))
|
assert_match(/'COMPACTION_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
|
assert_match(/'SPLIT_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
|
assert_match(/'MERGE_ENABLED' => 'false'/, admin.describe(@create_test_name))
|
||||||
end
|
end
|
||||||
|
|
||||||
#-------------------------------------------------------------------------------
|
#-------------------------------------------------------------------------------
|
||||||
|
|
Loading…
Reference in New Issue