HBASE-25986 set default value of normalization enabled from hbase site (#3372)

Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
Aman Poonia 2021-07-19 15:25:36 +05:30 committed by GitHub
parent d30cc27097
commit 0f313176be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 34 additions and 14 deletions

View File

@ -275,7 +275,7 @@ public interface TableDescriptor {
/** /**
* Check if normalization enable flag of the table is true. If flag is false * Check if normalization enable flag of the table is true. If flag is false
* then no region normalizer won't attempt to normalize this table. * then region normalizer won't attempt to normalize this table.
* *
* @return true if region normalization is enabled for this table * @return true if region normalization is enabled for this table
*/ */

View File

@ -222,11 +222,6 @@ public class TableDescriptorBuilder {
*/ */
public static final boolean DEFAULT_MERGE_ENABLED = true; public static final boolean DEFAULT_MERGE_ENABLED = true;
/**
* Constant that denotes whether the table is normalized by default.
*/
public static final boolean DEFAULT_NORMALIZATION_ENABLED = false;
/** /**
* Constant that denotes the maximum default size of the memstore in bytes after which * Constant that denotes the maximum default size of the memstore in bytes after which
* the contents are flushed to the store files. * the contents are flushed to the store files.
@ -248,7 +243,6 @@ public class TableDescriptorBuilder {
String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION)); DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
DEFAULT_VALUES.put(NORMALIZATION_ENABLED, String.valueOf(DEFAULT_NORMALIZATION_ENABLED));
DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY)); DEFAULT_VALUES.put(PRIORITY, String.valueOf(DEFAULT_PRIORITY));
DEFAULT_VALUES.keySet().stream() DEFAULT_VALUES.keySet().stream()
.map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add); .map(s -> new Bytes(Bytes.toBytes(s))).forEach(RESERVED_KEYWORDS::add);
@ -865,12 +859,11 @@ public class TableDescriptorBuilder {
/** /**
* Check if normalization enable flag of the table is true. If flag is false * Check if normalization enable flag of the table is true. If flag is false
* then no region normalizer won't attempt to normalize this table. * then no region normalizer won't attempt to normalize this table.
*
* @return true if region normalization is enabled for this table * @return true if region normalization is enabled for this table
*/ **/
@Override @Override
public boolean isNormalizationEnabled() { public boolean isNormalizationEnabled() {
return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, DEFAULT_NORMALIZATION_ENABLED); return getOrDefault(NORMALIZATION_ENABLED_KEY, Boolean::valueOf, false);
} }
/** /**

View File

@ -660,6 +660,13 @@ possible configurations would overwhelm and obscure the important.
<description>The minimum size for a region to be considered for a merge, in whole <description>The minimum size for a region to be considered for a merge, in whole
MBs.</description> MBs.</description>
</property> </property>
<property>
<name>hbase.table.normalization.enabled</name>
<value>false</value>
<description>This config is used to set default behaviour of normalizer at table level.
To override this at table level one can set NORMALIZATION_ENABLED at table descriptor level
and that property will be honored</description>
</property>
<property> <property>
<name>hbase.server.thread.wakefrequency</name> <name>hbase.server.thread.wakefrequency</name>
<value>10000</value> <value>10000</value>

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
@ -43,6 +44,8 @@ import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUti
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnable { class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnable {
public static final String HBASE_TABLE_NORMALIZATION_ENABLED =
"hbase.table.normalization.enabled";
private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class); private static final Logger LOG = LoggerFactory.getLogger(RegionNormalizerWorker.class);
static final String RATE_LIMIT_BYTES_PER_SEC_KEY = static final String RATE_LIMIT_BYTES_PER_SEC_KEY =
@ -55,6 +58,7 @@ class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnab
private final RateLimiter rateLimiter; private final RateLimiter rateLimiter;
private final long[] skippedCount; private final long[] skippedCount;
private final boolean defaultNormalizerTableLevel;
private long splitPlanCount; private long splitPlanCount;
private long mergePlanCount; private long mergePlanCount;
@ -71,6 +75,12 @@ class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnab
this.splitPlanCount = 0; this.splitPlanCount = 0;
this.mergePlanCount = 0; this.mergePlanCount = 0;
this.rateLimiter = loadRateLimiter(configuration); this.rateLimiter = loadRateLimiter(configuration);
this.defaultNormalizerTableLevel = extractDefaultNormalizerValue(configuration);
}
private boolean extractDefaultNormalizerValue(final Configuration configuration) {
String s = configuration.get(HBASE_TABLE_NORMALIZATION_ENABLED);
return Boolean.parseBoolean(s);
} }
@Override @Override
@ -181,11 +191,21 @@ class RegionNormalizerWorker implements PropagatingConfigurationObserver, Runnab
final TableDescriptor tblDesc; final TableDescriptor tblDesc;
try { try {
tblDesc = masterServices.getTableDescriptors().get(tableName); tblDesc = masterServices.getTableDescriptors().get(tableName);
if (tblDesc != null && !tblDesc.isNormalizationEnabled()) { boolean normalizationEnabled;
LOG.debug("Skipping table {} because normalization is disabled in its table properties.", if (tblDesc != null) {
String defined = tblDesc.getValue(TableDescriptorBuilder.NORMALIZATION_ENABLED);
if(defined != null) {
normalizationEnabled = tblDesc.isNormalizationEnabled();
} else {
normalizationEnabled = this.defaultNormalizerTableLevel;
}
if (!normalizationEnabled) {
LOG.debug("Skipping table {} because normalization is disabled in its table properties " +
"and normalization is also disabled at table level by default",
tableName); tableName);
return Collections.emptyList(); return Collections.emptyList();
} }
}
} catch (IOException e) { } catch (IOException e) {
LOG.debug("Skipping table {} because unable to access its table descriptor.", tableName, e); LOG.debug("Skipping table {} because unable to access its table descriptor.", tableName, e);
return Collections.emptyList(); return Collections.emptyList();