HBASE-24970 Backport HBASE-20985 to branch-1

Closes #2334

Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
mnpoonia 2020-09-03 14:01:56 +05:30 committed by Viraj Jasani
parent 1dc75e74a7
commit a48515ab7d
No known key found for this signature in database
GPG Key ID: B3D6C0B41C8ADFD5
6 changed files with 200 additions and 21 deletions

View File

@ -209,6 +209,15 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
private static final ImmutableBytesWritable NORMALIZATION_ENABLED_KEY =
new ImmutableBytesWritable(Bytes.toBytes(NORMALIZATION_ENABLED));
public static final String NORMALIZER_TARGET_REGION_COUNT =
"NORMALIZER_TARGET_REGION_COUNT";
private static final ImmutableBytesWritable NORMALIZER_TARGET_REGION_COUNT_KEY =
new ImmutableBytesWritable(Bytes.toBytes(NORMALIZER_TARGET_REGION_COUNT));
public static final String NORMALIZER_TARGET_REGION_SIZE = "NORMALIZER_TARGET_REGION_SIZE";
private static final ImmutableBytesWritable NORMALIZER_TARGET_REGION_SIZE_KEY =
new ImmutableBytesWritable(Bytes.toBytes(NORMALIZER_TARGET_REGION_SIZE));
/** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
@ -684,6 +693,33 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
return this;
}
public HTableDescriptor setNormalizerTargetRegionCount(final int regionCount) {
setValue(NORMALIZER_TARGET_REGION_COUNT_KEY, Integer.toString(regionCount));
return this;
}
public int getNormalizerTargetRegionCount() {
byte [] value = getValue(NORMALIZER_TARGET_REGION_COUNT_KEY);
if (value != null) {
return Integer.parseInt(Bytes.toString(value));
}
return -1;
}
public HTableDescriptor setNormalizerTargetRegionSize(final long regionSize) {
setValue(NORMALIZER_TARGET_REGION_SIZE_KEY, Long.toString(regionSize));
return this;
}
public long getNormalizerTargetRegionSize() {
byte [] value = getValue(NORMALIZER_TARGET_REGION_SIZE_KEY);
if (value != null) {
return Long.parseLong(Bytes.toString(value));
}
return -1;
}
/**
* Sets the {@link Durability} setting for the table. This defaults to Durability.USE_DEFAULT.
* @param durability enum value

View File

@ -18,15 +18,17 @@
*/
package org.apache.hadoop.hbase.master.normalizer;
import com.google.protobuf.ServiceException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@ -36,8 +38,6 @@ import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import com.google.protobuf.ServiceException;
/**
* Simple implementation of region normalizer.
*
@ -152,7 +152,31 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
}
}
double avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;
int targetRegionCount = -1;
long targetRegionSize = -1;
try {
HTableDescriptor tableDescriptor = masterServices.getTableDescriptors().get(table);
if(tableDescriptor != null) {
targetRegionCount =
tableDescriptor.getNormalizerTargetRegionCount();
targetRegionSize =
tableDescriptor.getNormalizerTargetRegionSize();
LOG.debug("Table " + table + ": target region count is " + targetRegionCount
+ ", target region size is targetRegionSize");
}
} catch (IOException e) {
LOG.warn("cannot get the target number and target size of table " + table
+ ", they will be default value -1.");
}
double avgRegionSize;
if (targetRegionSize > 0) {
avgRegionSize = targetRegionSize;
} else if (targetRegionCount > 0) {
avgRegionSize = totalSizeMb / (double) targetRegionCount;
} else {
avgRegionSize = acutalRegionCnt == 0 ? 0 : totalSizeMb / (double) acutalRegionCnt;
}
LOG.debug("Table " + table + ", total aggregated regions size: " + totalSizeMb);
LOG.debug("Table " + table + ", average region size: " + avgRegionSize);

View File

@ -18,6 +18,18 @@
*/
package org.apache.hadoop.hbase.master.normalizer;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
import static org.mockito.Mockito.when;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseIOException;
@ -27,7 +39,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.MasterRpcServices;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -37,20 +48,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
import static org.mockito.Mockito.when;
/**
* Tests logic of {@link SimpleRegionNormalizer}.
*/
@ -263,6 +260,110 @@ public class TestSimpleRegionNormalizer {
assertEquals(hri4, ((SplitNormalizationPlan) plan).getRegionInfo());
}
@Test
public void testSplitWithTargetRegionCount() throws Exception {
final TableName tableName = TableName.valueOf("testSplitWithTargetRegionCount");
List<HRegionInfo> RegionInfo = new ArrayList<>();
Map<byte[], Integer> regionSizes = new HashMap<>();
HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
RegionInfo.add(hri1);
regionSizes.put(hri1.getRegionName(), 20);
HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
RegionInfo.add(hri2);
regionSizes.put(hri2.getRegionName(), 40);
HRegionInfo hri3 = new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
RegionInfo.add(hri3);
regionSizes.put(hri3.getRegionName(), 60);
HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
RegionInfo.add(hri4);
regionSizes.put(hri4.getRegionName(), 80);
HRegionInfo hri5 = new HRegionInfo(tableName, Bytes.toBytes("eee"), Bytes.toBytes("fff"));
RegionInfo.add(hri5);
regionSizes.put(hri5.getRegionName(), 100);
HRegionInfo hri6 = new HRegionInfo(tableName, Bytes.toBytes("fff"), Bytes.toBytes("ggg"));
RegionInfo.add(hri6);
regionSizes.put(hri6.getRegionName(), 120);
setupMocksForNormalizer(regionSizes, RegionInfo);
// test when target region size is 20
when(
masterServices.getTableDescriptors().get((TableName) any()).getNormalizerTargetRegionSize())
.thenReturn(20L);
List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
assertEquals(4, plans.size());
for (NormalizationPlan plan : plans) {
assertTrue(plan instanceof SplitNormalizationPlan);
}
// test when target region size is 200
when(
masterServices.getTableDescriptors().get((TableName) any()).getNormalizerTargetRegionSize())
.thenReturn(200L);
plans = normalizer.computePlanForTable(tableName);
assertEquals(2, plans.size());
NormalizationPlan plan = plans.get(0);
assertTrue(plan instanceof MergeNormalizationPlan);
assertEquals(hri1, ((MergeNormalizationPlan) plan).getFirstRegion());
assertEquals(hri2, ((MergeNormalizationPlan) plan).getSecondRegion());
}
@Test
public void testSplitWithTargetRegionSize() throws Exception {
final TableName tableName = TableName.valueOf("testSplitWithTargetRegionSize");
List<HRegionInfo> RegionInfo = new ArrayList<>();
Map<byte[], Integer> regionSizes = new HashMap<>();
HRegionInfo hri1 = new HRegionInfo(tableName, Bytes.toBytes("aaa"), Bytes.toBytes("bbb"));
RegionInfo.add(hri1);
regionSizes.put(hri1.getRegionName(), 20);
HRegionInfo hri2 = new HRegionInfo(tableName, Bytes.toBytes("bbb"), Bytes.toBytes("ccc"));
RegionInfo.add(hri2);
regionSizes.put(hri2.getRegionName(), 40);
HRegionInfo hri3 =new HRegionInfo(tableName, Bytes.toBytes("ccc"), Bytes.toBytes("ddd"));
RegionInfo.add(hri3);
regionSizes.put(hri3.getRegionName(), 60);
HRegionInfo hri4 = new HRegionInfo(tableName, Bytes.toBytes("ddd"), Bytes.toBytes("eee"));
RegionInfo.add(hri4);
regionSizes.put(hri4.getRegionName(), 80);
setupMocksForNormalizer(regionSizes, RegionInfo);
// test when target region count is 8
when(
masterServices.getTableDescriptors().get((TableName) any()).getNormalizerTargetRegionCount())
.thenReturn(8);
List<NormalizationPlan> plans = normalizer.computePlanForTable(tableName);
assertEquals(2, plans.size());
for (NormalizationPlan plan : plans) {
assertTrue(plan instanceof SplitNormalizationPlan);
}
// test when target region count is 3
when(
masterServices.getTableDescriptors().get((TableName) any()).getNormalizerTargetRegionCount())
.thenReturn(3);
plans = normalizer.computePlanForTable(tableName);
assertEquals(1, plans.size());
NormalizationPlan plan = plans.get(0);
assertTrue(plan instanceof MergeNormalizationPlan);
assertEquals(hri1, ((MergeNormalizationPlan) plan).getFirstRegion());
assertEquals(hri2, ((MergeNormalizationPlan) plan).getSecondRegion());
}
@SuppressWarnings("MockitoCast")
protected void setupMocksForNormalizer(Map<byte[], Integer> regionSizes,
List<HRegionInfo> hris) {

View File

@ -1054,7 +1054,7 @@ module Hbase
end
return servernames
end
end
# Apply config specific to a table/column to its descriptor
def set_descriptor_config(descriptor, config)
@ -1202,6 +1202,16 @@ module Hbase
htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY)
htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED)
htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED)
if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT)
htd.setNormalizerTargetRegionCount(JInteger.valueOf(
arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT)
))
end
if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE)
htd.setNormalizerTargetRegionSize(JLong.valueOf(
arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE)
))
end
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE)
# DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists.
# However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set

View File

@ -49,7 +49,8 @@ To delete the 'f1' column family in table 'ns1:t1', use one of:
hbase> alter 'ns1:t1', 'delete' => 'f1'
You can also change table-scope attributes like MAX_FILESIZE, READONLY,
MEMSTORE_FLUSHSIZE, DURABILITY, etc. These can be put at the end;
MEMSTORE_FLUSHSIZE, NORMALIZATION_ENABLED, NORMALIZER_TARGET_REGION_COUNT,
NORMALIZER_TARGET_REGION_SIZE(MB), DURABILITY, etc. These can be put at the end;
for example, to change the max size of a region to 128MB, do:
hbase> alter 't1', MAX_FILESIZE => '134217728'

View File

@ -365,6 +365,13 @@ module Hbase
assert_match(/12345678/, admin.describe(@test_name))
end
define_test 'alter should be able to set the TargetRegionSize and TargetRegionCount' do
admin.alter(@test_name, true, 'NORMALIZER_TARGET_REGION_COUNT' => 156)
assert_match(/156/, admin.describe(@test_name))
admin.alter(@test_name, true, 'NORMALIZER_TARGET_REGION_SIZE' => 234)
assert_match(/234/, admin.describe(@test_name))
end
def capture_stdout
begin
old_stdout = $stdout