[jira] [HBASE-5335] Dynamic Schema Config

Summary: Ability to add config options on a per-table & per-cf basis

Test Plan: - mvn test

Reviewers: JIRA, Kannan, stack, mbautin, Liyin

Reviewed By: mbautin

CC: tedyu

Differential Revision: https://reviews.facebook.net/D2247

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1311269 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Nicolas Spiegelberg 2012-04-09 14:54:52 +00:00
parent 00316a4a51
commit 167f012d64
14 changed files with 1137 additions and 141 deletions

View File

@ -24,7 +24,9 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -153,7 +155,10 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
*/
public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL;
private final static Map<String, String> DEFAULT_VALUES = new HashMap<String, String>();
private final static Map<String, String> DEFAULT_VALUES
= new HashMap<String, String>();
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
= new HashSet<ImmutableBytesWritable>();
static {
DEFAULT_VALUES.put(BLOOMFILTER, DEFAULT_BLOOMFILTER);
DEFAULT_VALUES.put(REPLICATION_SCOPE, String.valueOf(DEFAULT_REPLICATION_SCOPE));
@ -169,13 +174,16 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
String.valueOf(DEFAULT_ENCODE_ON_DISK));
DEFAULT_VALUES.put(DATA_BLOCK_ENCODING,
String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
for (String s : DEFAULT_VALUES.keySet()) {
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
}
}
// Column family name
private byte [] name;
// Column metadata
protected Map<ImmutableBytesWritable,ImmutableBytesWritable> values =
protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable,ImmutableBytesWritable>();
/*
@ -431,6 +439,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @return All values.
*/
public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
}
@ -458,7 +467,11 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @return this (for chained invocation)
*/
public HColumnDescriptor setValue(String key, String value) {
if (value == null) {
remove(Bytes.toBytes(key));
} else {
setValue(Bytes.toBytes(key), Bytes.toBytes(value));
}
return this;
}
@ -761,16 +774,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
s.append(" => '");
s.append(Bytes.toString(name));
s.append("'");
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
String key = Bytes.toString(e.getKey().get());
String value = Bytes.toString(e.getValue().get());
s.append(", ");
s.append(key);
s.append(" => '");
s.append(value);
s.append("'");
}
s.append(getValues(true));
s.append('}');
return s.toString();
}
@ -785,22 +789,63 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
s.append(" => '");
s.append(Bytes.toString(name));
s.append("'");
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
String key = Bytes.toString(e.getKey().get());
String value = Bytes.toString(e.getValue().get());
if(DEFAULT_VALUES.get(key) == null || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
s.append(", ");
s.append(key);
s.append(" => '");
s.append(value);
s.append("'");
}
}
s.append(getValues(false));
s.append('}');
return s.toString();
}
private StringBuilder getValues(boolean printDefaults) {
StringBuilder s = new StringBuilder();
boolean hasConfigKeys = false;
// print all reserved keys first
for (ImmutableBytesWritable k : values.keySet()) {
if (!RESERVED_KEYWORDS.contains(k)) {
hasConfigKeys = true;
continue;
}
String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get());
if (printDefaults
|| !DEFAULT_VALUES.containsKey(key)
|| !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
s.append(", ");
s.append(key);
s.append(" => ");
s.append('\'').append(value).append('\'');
}
}
// print all non-reserved, advanced config keys as a separate subset
if (hasConfigKeys) {
s.append(", ");
s.append(HConstants.CONFIG).append(" => ");
s.append('{');
boolean printComma = false;
for (ImmutableBytesWritable k : values.keySet()) {
if (RESERVED_KEYWORDS.contains(k)) {
continue;
}
String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get());
if (printComma) {
s.append(", ");
}
printComma = true;
s.append('\'').append(key).append('\'');
s.append(" => ");
s.append('\'').append(value).append('\'');
}
s.append('}');
}
return s;
}
public static Map<String, String> getDefaultValues() {
return Collections.unmodifiableMap(DEFAULT_VALUES);
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/

View File

@ -408,6 +408,7 @@ public final class HConstants {
public static final String NAME = "NAME";
public static final String VERSIONS = "VERSIONS";
public static final String IN_MEMORY = "IN_MEMORY";
public static final String CONFIG = "CONFIG";
/**
* This is a retry backoff multiplier table similar to the BSD TCP syn

View File

@ -25,10 +25,12 @@ import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.regex.Matcher;
import org.apache.hadoop.classification.InterfaceAudience;
@ -68,7 +70,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* includes values like IS_ROOT, IS_META, DEFERRED_LOG_FLUSH, SPLIT_POLICY,
* MAX_FILE_SIZE, READONLY, MEMSTORE_FLUSHSIZE etc...
*/
protected Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
protected final Map<ImmutableBytesWritable, ImmutableBytesWritable> values =
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
private static final String FAMILIES = "FAMILIES";
@ -164,6 +166,25 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
private final static Map<String, String> DEFAULT_VALUES
= new HashMap<String, String>();
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
= new HashSet<ImmutableBytesWritable>();
static {
DEFAULT_VALUES.put(MAX_FILESIZE,
String.valueOf(HConstants.DEFAULT_MAX_FILE_SIZE));
DEFAULT_VALUES.put(READONLY, String.valueOf(DEFAULT_READONLY));
DEFAULT_VALUES.put(MEMSTORE_FLUSHSIZE,
String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE));
DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
for (String s : DEFAULT_VALUES.keySet()) {
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
}
RESERVED_KEYWORDS.add(IS_ROOT_KEY);
RESERVED_KEYWORDS.add(IS_META_KEY);
}
private volatile Boolean meta = null;
private volatile Boolean root = null;
private Boolean isDeferredLog = null;
@ -429,6 +450,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #values
*/
public Map<ImmutableBytesWritable,ImmutableBytesWritable> getValues() {
// shallow pointer copy
return Collections.unmodifiableMap(values);
}
@ -469,8 +491,12 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @see #values
*/
public void setValue(String key, String value) {
if (value == null) {
remove(Bytes.toBytes(key));
} else {
setValue(Bytes.toBytes(key), Bytes.toBytes(value));
}
}
/**
* Remove metadata represented by the key from the {@link #values} map
@ -679,36 +705,11 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
@Override
public String toString() {
StringBuilder s = new StringBuilder();
s.append('{');
s.append(HConstants.NAME);
s.append(" => '");
s.append(Bytes.toString(name));
s.append("'");
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
String key = Bytes.toString(e.getKey().get());
String value = Bytes.toString(e.getValue().get());
if (key == null) {
continue;
s.append('\'').append(Bytes.toString(name)).append('\'');
s.append(getValues(true));
for (HColumnDescriptor f : families.values()) {
s.append(", ").append(f);
}
String upperCase = key.toUpperCase();
if (upperCase.equals(IS_ROOT) || upperCase.equals(IS_META)) {
// Skip. Don't bother printing out read-only values if false.
if (value.toLowerCase().equals(Boolean.FALSE.toString())) {
continue;
}
}
s.append(", ");
s.append(Bytes.toString(e.getKey().get()));
s.append(" => '");
s.append(Bytes.toString(e.getValue().get()));
s.append("'");
}
s.append(", ");
s.append(FAMILIES);
s.append(" => ");
s.append(families.values());
s.append('}');
return s.toString();
}
@ -718,46 +719,84 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
*/
public String toStringCustomizedValues() {
StringBuilder s = new StringBuilder();
s.append('{');
s.append(HConstants.NAME);
s.append(" => '");
s.append(Bytes.toString(name));
s.append("'");
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
values.entrySet()) {
String key = Bytes.toString(e.getKey().get());
String value = Bytes.toString(e.getValue().get());
if (key == null) {
continue;
}
String upperCase = key.toUpperCase();
if (upperCase.equals(IS_ROOT) || upperCase.equals(IS_META)) {
// Skip. Don't bother printing out read-only values if false.
if (value.toLowerCase().equals(Boolean.FALSE.toString())) {
continue;
}
}
s.append(", ");
s.append(Bytes.toString(e.getKey().get()));
s.append(" => '");
s.append(Bytes.toString(e.getValue().get()));
s.append("'");
}
s.append(", ");
s.append(FAMILIES);
s.append(" => [");
int size = families.values().size();
int i = 0;
s.append('\'').append(Bytes.toString(name)).append('\'');
s.append(getValues(false));
for(HColumnDescriptor hcd : families.values()) {
s.append(hcd.toStringCustomizedValues());
i++;
if( i != size)
s.append(", ");
s.append(", ").append(hcd.toStringCustomizedValues());
}
s.append("]}");
return s.toString();
}
private StringBuilder getValues(boolean printDefaults) {
StringBuilder s = new StringBuilder();
// step 1: set partitioning and pruning
Set<ImmutableBytesWritable> reservedKeys = new TreeSet<ImmutableBytesWritable>();
Set<ImmutableBytesWritable> configKeys = new TreeSet<ImmutableBytesWritable>();
for (ImmutableBytesWritable k : values.keySet()) {
if (k == null || k.get() == null) continue;
String key = Bytes.toString(k.get());
// in this section, print out reserved keywords + coprocessor info
if (!RESERVED_KEYWORDS.contains(k) && !key.startsWith("coprocessor$")) {
configKeys.add(k);
continue;
}
// only print out IS_ROOT/IS_META if true
String value = Bytes.toString(values.get(k).get());
if (key.equalsIgnoreCase(IS_ROOT) || key.equalsIgnoreCase(IS_META)) {
if (Boolean.valueOf(value) == false) continue;
}
// see if a reserved key is a default value. may not want to print it out
if (printDefaults
|| !DEFAULT_VALUES.containsKey(key)
|| !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) {
reservedKeys.add(k);
}
}
// early exit optimization
if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s;
// step 2: printing
s.append(", {METHOD => 'table_att'");
// print all reserved keys first
for (ImmutableBytesWritable k : reservedKeys) {
String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get());
s.append(", ");
s.append(key);
s.append(" => ");
s.append('\'').append(value).append('\'');
}
if (!configKeys.isEmpty()) {
// print all non-reserved, advanced config keys as a separate subset
s.append(", ");
s.append(HConstants.CONFIG).append(" => ");
s.append("{");
boolean printComma = false;
for (ImmutableBytesWritable k : configKeys) {
String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get());
if (printComma) s.append(", ");
printComma = true;
s.append('\'').append(key).append('\'');
s.append(" => ");
s.append('\'').append(value).append('\'');
}
s.append("}");
}
s.append('}'); // end METHOD
return s;
}
public static Map<String, String> getDefaultValues() {
return Collections.unmodifiableMap(DEFAULT_VALUES);
}
/**
* Compare the contents of the descriptor with another one passed as a parameter.
* Checks if the obj passed is an instance of HTableDescriptor, if yes then the

View File

@ -0,0 +1,466 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
/**
* Do a shallow merge of multiple KV configuration pools. This is a very useful
* utility class to easily add per-object configurations in addition to wider
* scope settings. This is different from Configuration.addResource()
* functionality, which performs a deep merge and mutates the common data
* structure.
* <p>
* For clarity: the shallow merge allows the user to mutate either of the
* configuration objects and have changes reflected everywhere. In contrast to a
* deep merge, that requires you to explicitly know all applicable copies to
* propagate changes.
* <p>
* This class is package private because we expect significant refactoring here
* on the HBase side when certain HDFS changes are added & ubiquitous. Will
* revisit expanding access at that point.
*/
@InterfaceAudience.Private
class CompoundConfiguration extends Configuration {
/**
* Default Constructor. Initializes empty configuration
*/
public CompoundConfiguration() {
}
// Devs: these APIs are the same contract as their counterparts in
// Configuration.java
private static interface ImmutableConfigMap {
String get(String key);
String getRaw(String key);
Class<?> getClassByName(String name) throws ClassNotFoundException;
int size();
}
protected List<ImmutableConfigMap> configs
= new ArrayList<ImmutableConfigMap>();
/****************************************************************************
* These initial APIs actually required original thought
***************************************************************************/
/**
* Add Hadoop Configuration object to config list
* @param conf configuration object
* @return this, for builder pattern
*/
public CompoundConfiguration add(final Configuration conf) {
if (conf instanceof CompoundConfiguration) {
this.configs.addAll(0, ((CompoundConfiguration) conf).configs);
return this;
}
// put new config at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
Configuration c = conf;
@Override
public String get(String key) {
return c.get(key);
}
@Override
public String getRaw(String key) {
return c.getRaw(key);
}
@Override
public Class<?> getClassByName(String name)
throws ClassNotFoundException {
return c.getClassByName(name);
}
@Override
public int size() {
return c.size();
}
@Override
public String toString() {
return c.toString();
}
});
return this;
}
/**
* Add ImmutableBytesWritable map to config list. This map is generally
* created by HTableDescriptor or HColumnDescriptor, but can be abstractly
* used.
*
* @param map
* ImmutableBytesWritable map
* @return this, for builder pattern
*/
public CompoundConfiguration add(
final Map<ImmutableBytesWritable, ImmutableBytesWritable> map) {
// put new map at the front of the list (top priority)
this.configs.add(0, new ImmutableConfigMap() {
Map<ImmutableBytesWritable, ImmutableBytesWritable> m = map;
@Override
public String get(String key) {
ImmutableBytesWritable ibw = new ImmutableBytesWritable(Bytes
.toBytes(key));
if (!m.containsKey(ibw))
return null;
ImmutableBytesWritable value = m.get(ibw);
if (value == null || value.get() == null)
return null;
return Bytes.toString(value.get());
}
@Override
public String getRaw(String key) {
return get(key);
}
@Override
public Class<?> getClassByName(String name)
throws ClassNotFoundException {
return null;
}
@Override
public int size() {
// TODO Auto-generated method stub
return m.size();
}
@Override
public String toString() {
return m.toString();
}
});
return this;
}
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
sb.append("CompoundConfiguration: " + this.configs.size() + " configs");
for (ImmutableConfigMap m : this.configs) {
sb.append(this.configs);
}
return sb.toString();
}
@Override
public String get(String key) {
for (ImmutableConfigMap m : this.configs) {
String value = m.get(key);
if (value != null) {
return value;
}
}
return null;
}
@Override
public String getRaw(String key) {
for (ImmutableConfigMap m : this.configs) {
String value = m.getRaw(key);
if (value != null) {
return value;
}
}
return null;
}
@Override
public Class<?> getClassByName(String name) throws ClassNotFoundException {
for (ImmutableConfigMap m : this.configs) {
try {
Class<?> value = m.getClassByName(name);
if (value != null) {
return value;
}
} catch (ClassNotFoundException e) {
// don't propagate an exception until all configs fail
continue;
}
}
throw new ClassNotFoundException();
}
@Override
public int size() {
int ret = 0;
for (ImmutableConfigMap m : this.configs) {
ret += m.size();
}
return ret;
}
/***************************************************************************
* You should just ignore everything below this line unless there's a bug in
* Configuration.java...
*
* Below get APIs are directly copied from Configuration.java Oh, how I wish
* this wasn't so! A tragically-sad example of why you use interfaces instead
* of inheritance.
*
* Why the duplication? We basically need to override Configuration.getProps
* or we'd need protected access to Configuration.properties so we can modify
* that pointer. There are a bunch of functions in the base Configuration that
* call getProps() and we need to use our derived version instead of the base
* version. We need to make a generic implementation that works across all
* HDFS versions. We should modify Configuration.properties in HDFS 1.0 to be
* protected, but we still need to have this code until that patch makes it to
* all the HDFS versions we support.
***************************************************************************/
@Override
public String get(String name, String defaultValue) {
String ret = get(name);
return ret == null ? defaultValue : ret;
}
@Override
public int getInt(String name, int defaultValue) {
String valueString = get(name);
if (valueString == null)
return defaultValue;
try {
String hexString = getHexDigits(valueString);
if (hexString != null) {
return Integer.parseInt(hexString, 16);
}
return Integer.parseInt(valueString);
} catch (NumberFormatException e) {
return defaultValue;
}
}
@Override
public long getLong(String name, long defaultValue) {
String valueString = get(name);
if (valueString == null)
return defaultValue;
try {
String hexString = getHexDigits(valueString);
if (hexString != null) {
return Long.parseLong(hexString, 16);
}
return Long.parseLong(valueString);
} catch (NumberFormatException e) {
return defaultValue;
}
}
protected String getHexDigits(String value) {
boolean negative = false;
String str = value;
String hexString = null;
if (value.startsWith("-")) {
negative = true;
str = value.substring(1);
}
if (str.startsWith("0x") || str.startsWith("0X")) {
hexString = str.substring(2);
if (negative) {
hexString = "-" + hexString;
}
return hexString;
}
return null;
}
@Override
public float getFloat(String name, float defaultValue) {
String valueString = get(name);
if (valueString == null)
return defaultValue;
try {
return Float.parseFloat(valueString);
} catch (NumberFormatException e) {
return defaultValue;
}
}
@Override
public boolean getBoolean(String name, boolean defaultValue) {
String valueString = get(name);
if ("true".equals(valueString))
return true;
else if ("false".equals(valueString))
return false;
else return defaultValue;
}
@Override
public IntegerRanges getRange(String name, String defaultValue) {
return new IntegerRanges(get(name, defaultValue));
}
@Override
public Collection<String> getStringCollection(String name) {
String valueString = get(name);
return StringUtils.getStringCollection(valueString);
}
@Override
public String[] getStrings(String name) {
String valueString = get(name);
return StringUtils.getStrings(valueString);
}
@Override
public String[] getStrings(String name, String... defaultValue) {
String valueString = get(name);
if (valueString == null) {
return defaultValue;
} else {
return StringUtils.getStrings(valueString);
}
}
@Override
public Class<?>[] getClasses(String name, Class<?>... defaultValue) {
String[] classnames = getStrings(name);
if (classnames == null)
return defaultValue;
try {
Class<?>[] classes = new Class<?>[classnames.length];
for (int i = 0; i < classnames.length; i++) {
classes[i] = getClassByName(classnames[i]);
}
return classes;
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
@Override
public Class<?> getClass(String name, Class<?> defaultValue) {
String valueString = get(name);
if (valueString == null)
return defaultValue;
try {
return getClassByName(valueString);
} catch (ClassNotFoundException e) {
throw new RuntimeException(e);
}
}
@Override
public <U> Class<? extends U> getClass(String name,
Class<? extends U> defaultValue, Class<U> xface) {
try {
Class<?> theClass = getClass(name, defaultValue);
if (theClass != null && !xface.isAssignableFrom(theClass))
throw new RuntimeException(theClass + " not " + xface.getName());
else if (theClass != null)
return theClass.asSubclass(xface);
else
return null;
} catch (Exception e) {
throw new RuntimeException(e);
}
}
/*******************************************************************
* This class is immutable. Quickly abort any attempts to alter it *
*******************************************************************/
@Override
public void clear() {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public Iterator<Map.Entry<String, String>> iterator() {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void set(String name, String value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setIfUnset(String name, String value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setInt(String name, int value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setLong(String name, long value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setFloat(String name, float value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setBoolean(String name, boolean value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setBooleanIfUnset(String name, boolean value) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setStrings(String name, String... values) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setClass(String name, Class<?> theClass, Class<?> xface) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void setClassLoader(ClassLoader classLoader) {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void write(DataOutput out) throws IOException {
throw new UnsupportedOperationException("Immutable Configuration");
}
@Override
public void writeXml(OutputStream out) throws IOException {
throw new UnsupportedOperationException("Immutable Configuration");
}
};

View File

@ -231,6 +231,7 @@ public class HRegion implements HeapSize { // , Writable{
final HLog log;
final FileSystem fs;
final Configuration conf;
final Configuration baseConf;
final int rowLockWaitDuration;
static final int DEFAULT_ROWLOCK_WAIT_DURATION = 30000;
@ -425,6 +426,7 @@ public class HRegion implements HeapSize { // , Writable{
this.conf = null;
this.rowLockWaitDuration = DEFAULT_ROWLOCK_WAIT_DURATION;
this.rsServices = null;
this.baseConf = null;
this.fs = null;
this.timestampSlop = HConstants.LATEST_TIMESTAMP;
this.rowProcessorTimeout = DEFAULT_ROW_PROCESSOR_TIMEOUT;
@ -438,6 +440,16 @@ public class HRegion implements HeapSize { // , Writable{
this.scannerReadPoints = new ConcurrentHashMap<RegionScanner, Long>();
}
/**
* HRegion copy constructor. Useful when reopening a closed region (normally
* for unit tests)
* @param other original object
*/
public HRegion(HRegion other) {
this(other.getTableDir(), other.getLog(), other.getFilesystem(),
other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
}
/**
* HRegion constructor. his constructor should only be used for testing and
* extensions. Instances of HRegion should be instantiated with the
@ -461,14 +473,21 @@ public class HRegion implements HeapSize { // , Writable{
*
* @see HRegion#newHRegion(Path, HLog, FileSystem, Configuration, HRegionInfo, HTableDescriptor, RegionServerServices)
*/
public HRegion(Path tableDir, HLog log, FileSystem fs, Configuration conf,
final HRegionInfo regionInfo, final HTableDescriptor htd,
RegionServerServices rsServices) {
public HRegion(Path tableDir, HLog log, FileSystem fs,
Configuration confParam, final HRegionInfo regionInfo,
final HTableDescriptor htd, RegionServerServices rsServices) {
this.tableDir = tableDir;
this.comparator = regionInfo.getComparator();
this.log = log;
this.fs = fs;
this.conf = conf;
if (confParam instanceof CompoundConfiguration) {
throw new IllegalArgumentException("Need original base configuration");
}
// 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
this.baseConf = confParam;
this.conf = new CompoundConfiguration()
.add(confParam)
.add(htd.getValues());
this.rowLockWaitDuration = conf.getInt("hbase.rowlock.wait.duration",
DEFAULT_ROWLOCK_WAIT_DURATION);
this.regionInfo = regionInfo;
@ -1099,9 +1118,15 @@ public class HRegion implements HeapSize { // , Writable{
return this.log;
}
/** @return Configuration object */
public Configuration getConf() {
return this.conf;
/**
* A split takes the config from the parent region & passes it to the daughter
* region's constructor. If 'conf' was passed, you would end up using the HTD
* of the parent region in addition to the new daughter HTD. Pass 'baseConf'
* to the daughter regions to avoid this tricky dedupe problem.
* @return Configuration object
*/
Configuration getBaseConf() {
return this.baseConf;
}
/** @return region directory Path */
@ -3930,7 +3955,7 @@ public class HRegion implements HeapSize { // , Writable{
listPaths(fs, b.getRegionDir());
}
Configuration conf = a.getConf();
Configuration conf = a.baseConf;
HTableDescriptor tabledesc = a.getTableDesc();
HLog log = a.getLog();
Path tableDir = a.getTableDir();

View File

@ -686,7 +686,7 @@ public class SplitTransaction {
Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
this.splitdir, hri);
HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
this.parent.getLog(), fs, this.parent.getConf(),
this.parent.getLog(), fs, this.parent.getBaseConf(),
hri, this.parent.getTableDesc(), rsServices);
r.readRequestsCount.set(this.parent.getReadRequestsCount() / 2);
r.writeRequestsCount.set(this.parent.getWriteRequestsCount() / 2);

View File

@ -171,9 +171,10 @@ public class Store extends SchemaConfigured implements HeapSize {
* @throws IOException
*/
protected Store(Path basedir, HRegion region, HColumnDescriptor family,
FileSystem fs, Configuration conf)
FileSystem fs, Configuration confParam)
throws IOException {
super(conf, region.getRegionInfo().getTableNameAsString(),
super(new CompoundConfiguration().add(confParam).add(
family.getValues()), region.getRegionInfo().getTableNameAsString(),
Bytes.toString(family.getName()));
HRegionInfo info = region.getRegionInfo();
this.fs = fs;
@ -183,7 +184,10 @@ public class Store extends SchemaConfigured implements HeapSize {
this.homedir = createStoreHomeDir(this.fs, p);
this.region = region;
this.family = family;
this.conf = conf;
// 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
this.conf = new CompoundConfiguration()
.add(confParam)
.add(family.getValues());
this.blocksize = family.getBlocksize();
this.dataBlockEncoder =
@ -209,6 +213,7 @@ public class Store extends SchemaConfigured implements HeapSize {
this.minFilesToCompact = Math.max(2,
conf.getInt("hbase.hstore.compaction.min",
/*old name*/ conf.getInt("hbase.hstore.compactionThreshold", 3)));
LOG.info("hbase.hstore.compaction.min = " + this.minFilesToCompact);
// Setting up cache configuration for this family
this.cacheConf = new CacheConfig(conf, family);

View File

@ -40,6 +40,7 @@ module HBaseConstants
NAME = org.apache.hadoop.hbase.HConstants::NAME
VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
CONFIG = org.apache.hadoop.hbase.HConstants::CONFIG
STOPROW = "STOPROW"
STARTROW = "STARTROW"
ENDROW = STOPROW

View File

@ -182,7 +182,12 @@ module Hbase
raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
end
if arg.kind_of?(Hash) and (arg.has_key?(SPLITS) or arg.has_key?(SPLITS_FILE))
if arg.kind_of?(String)
# the arg is a string, default action is to add a column to the table
htd.addFamily(hcd(arg, htd))
else
# arg is a hash. 4 possibilities:
if (arg.has_key?(SPLITS) or arg.has_key?(SPLITS_FILE))
if arg.has_key?(SPLITS_FILE)
unless File.exist?(arg[SPLITS_FILE])
raise(ArgumentError, "Splits file #{arg[SPLITS_FILE]} doesn't exist")
@ -199,21 +204,43 @@ module Hbase
splits[idx] = split.to_java_bytes
idx = idx + 1
end
elsif arg.kind_of?(Hash) and (arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO))
elsif (arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO))
# (1) deprecated region pre-split API
raise(ArgumentError, "Column family configuration should be specified in a separate clause") if arg.has_key?(NAME)
raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
raise(ArgumentError, "Number of regions must be geter than 1") unless arg[NUMREGIONS] > 1
raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
num_regions = arg[NUMREGIONS]
split_algo = org.apache.hadoop.hbase.util.RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
splits = split_algo.split(JInteger.valueOf(num_regions))
elsif (method = arg.delete(METHOD))
# (2) table_att modification
raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
raise(ArgumentError, "NUMREGIONS & SPLITALGO must both be specified") unless arg.has_key?(NUMREGIONS) == arg.has_key?(split_algo)
htd.setMaxFileSize(JLong.valueOf(arg[MAX_FILESIZE])) if arg[MAX_FILESIZE]
htd.setReadOnly(JBoolean.valueOf(arg[READONLY])) if arg[READONLY]
htd.setMemStoreFlushSize(JLong.valueOf(arg[MEMSTORE_FLUSHSIZE])) if arg[MEMSTORE_FLUSHSIZE]
htd.setDeferredLogFlush(JBoolean.valueOf(arg[DEFERRED_LOG_FLUSH])) if arg[DEFERRED_LOG_FLUSH]
htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
if arg[NUMREGIONS]
raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
num_regions = arg[NUMREGIONS]
split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
splits = split_algo.split(JInteger.valueOf(num_regions))
else
# Add column to the table
descriptor = hcd(arg, htd)
if arg[COMPRESSION_COMPACT]
descriptor.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT])
end
htd.addFamily(descriptor)
if arg[CONFIG]
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
for k,v in arg[CONFIG]
v = v.to_s unless v.nil?
htd.setValue(k, v)
end
end
else
# (3) column family spec
descriptor = hcd(arg, htd)
htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
htd.addFamily(hcd(arg, htd))
end
end
end
@ -414,6 +441,13 @@ module Hbase
end
end
if arg[CONFIG]
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
for k,v in arg[CONFIG]
v = v.to_s unless v.nil?
htd.setValue(k, v)
end
end
@admin.modifyTable(table_name.to_java_bytes, htd)
if wait == true
puts "Updating all regions with the new schema..."
@ -555,6 +589,14 @@ module Hbase
family.setCompressionType(org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.valueOf(compression))
end
end
if arg[CONFIG]
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash)
for k,v in arg[CONFIG]
v = v.to_s unless v.nil?
family.setValue(k, v)
end
end
return family
end

View File

@ -180,9 +180,7 @@ public abstract class HBaseTestCase extends TestCase {
protected HRegion openClosedRegion(final HRegion closedRegion)
throws IOException {
HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
closedRegion.getFilesystem(), closedRegion.getConf(),
closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null);
HRegion r = new HRegion(closedRegion);
r.initialize();
return r;
}

View File

@ -0,0 +1,260 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
@Category(LargeTests.class)
public class TestFromClientSide3 {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL
= new HBaseTestingUtility();
private static byte[] ROW = Bytes.toBytes("testRow");
private static byte[] FAMILY = Bytes.toBytes("testFamily");
private static byte[] QUALIFIER = Bytes.toBytes("testQualifier");
private static byte[] VALUE = Bytes.toBytes("testValue");
private static Random random = new Random();
private static int SLAVES = 3;
/**
* @throws java.lang.Exception
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(
"hbase.online.schema.update.enable", true);
TEST_UTIL.startMiniCluster(SLAVES);
}
/**
* @throws java.lang.Exception
*/
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
// Nothing to do.
}
/**
* @throws java.lang.Exception
*/
@After
public void tearDown() throws Exception {
// Nothing to do.
}
private void randomCFPuts(HTable table, byte[] row, byte[] family, int nPuts)
throws Exception {
Put put = new Put(row);
for (int i = 0; i < nPuts; i++) {
byte[] qualifier = Bytes.toBytes(random.nextInt());
byte[] value = Bytes.toBytes(random.nextInt());
put.add(family, qualifier, value);
}
table.put(put);
}
private void performMultiplePutAndFlush(HBaseAdmin admin, HTable table,
byte[] row, byte[] family, int nFlushes, int nPuts) throws Exception {
// connection needed for poll-wait
HConnection conn = HConnectionManager.getConnection(TEST_UTIL
.getConfiguration());
HRegionLocation loc = table.getRegionLocation(row, true);
HRegionInterface server = conn.getHRegionConnection(loc.getHostname(), loc
.getPort());
byte[] regName = loc.getRegionInfo().getRegionName();
for (int i = 0; i < nFlushes; i++) {
randomCFPuts(table, row, family, nPuts);
int sfCount = server.getStoreFileList(regName, FAMILY).size();
// TODO: replace this api with a synchronous flush after HBASE-2949
admin.flush(table.getTableName());
// synchronously poll wait for a new storefile to appear (flush happened)
while (server.getStoreFileList(regName, FAMILY).size() == sfCount) {
Thread.sleep(40);
}
}
}
// override the config settings at the CF level and ensure priority
@Test(timeout = 60000)
public void testAdvancedConfigOverride() throws Exception {
/*
* Overall idea: (1) create 3 store files and issue a compaction. config's
* compaction.min == 3, so should work. (2) Increase the compaction.min
* toggle in the HTD to 5 and modify table. If we use the HTD value instead
* of the default config value, adding 3 files and issuing a compaction
* SHOULD NOT work (3) Decrease the compaction.min toggle in the HCD to 2
* and modify table. The CF schema should override the Table schema and now
* cause a minor compaction.
*/
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
String tableName = "testAdvancedConfigOverride";
byte[] TABLE = Bytes.toBytes(tableName);
HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
HConnection connection = HConnectionManager.getConnection(TEST_UTIL
.getConfiguration());
// Create 3 store files.
byte[] row = Bytes.toBytes(random.nextInt());
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 100);
// Verify we have multiple store files.
HRegionLocation loc = hTable.getRegionLocation(row, true);
byte[] regionName = loc.getRegionInfo().getRegionName();
HRegionInterface server = connection.getHRegionConnection(
loc.getHostname(), loc.getPort());
assertTrue(server.getStoreFileList(regionName, FAMILY).size() > 1);
// Issue a compaction request
admin.compact(TABLE);
// poll wait for the compactions to happen
for (int i = 0; i < 10 * 1000 / 40; ++i) {
// The number of store files after compaction should be lesser.
loc = hTable.getRegionLocation(row, true);
if (!loc.getRegionInfo().isOffline()) {
regionName = loc.getRegionInfo().getRegionName();
server = connection.getHRegionConnection(loc.getHostname(), loc
.getPort());
if (server.getStoreFileList(regionName, FAMILY).size() <= 1) {
break;
}
}
Thread.sleep(40);
}
// verify the compactions took place and that we didn't just time out
assertTrue(server.getStoreFileList(regionName, FAMILY).size() <= 1);
// change the compaction.min config option for this table to 5
LOG.info("hbase.hstore.compaction.min should now be 5");
HTableDescriptor htd = new HTableDescriptor(hTable.getTableDescriptor());
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
admin.modifyTable(TABLE, htd);
Pair<Integer, Integer> st;
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
LOG.debug(st.getFirst() + " regions left to update");
Thread.sleep(40);
}
LOG.info("alter status finished");
// Create 3 more store files.
performMultiplePutAndFlush(admin, hTable, row, FAMILY, 3, 10);
// Issue a compaction request
admin.compact(TABLE);
// This time, the compaction request should not happen
Thread.sleep(10 * 1000);
int sfCount = 0;
loc = hTable.getRegionLocation(row, true);
regionName = loc.getRegionInfo().getRegionName();
server = connection.getHRegionConnection(loc.getHostname(), loc.getPort());
sfCount = server.getStoreFileList(regionName, FAMILY).size();
assertTrue(sfCount > 1);
// change an individual CF's config option to 2 & online schema update
LOG.info("hbase.hstore.compaction.min should now be 2");
HColumnDescriptor hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
hcd.setValue("hbase.hstore.compaction.min", String.valueOf(2));
htd.addFamily(hcd);
admin.modifyTable(TABLE, htd);
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
LOG.debug(st.getFirst() + " regions left to update");
Thread.sleep(40);
}
LOG.info("alter status finished");
// Issue a compaction request
admin.compact(TABLE);
// poll wait for the compactions to happen
for (int i = 0; i < 10 * 1000 / 40; ++i) {
loc = hTable.getRegionLocation(row, true);
regionName = loc.getRegionInfo().getRegionName();
try {
server = connection.getHRegionConnection(loc.getHostname(), loc
.getPort());
if (server.getStoreFileList(regionName, FAMILY).size() < sfCount) {
break;
}
} catch (Exception e) {
LOG.debug("Waiting for region to come online: " + regionName);
}
Thread.sleep(40);
}
// verify the compaction took place and that we didn't just time out
assertTrue(server.getStoreFileList(regionName, FAMILY).size() < sfCount);
// Finally, ensure that we can remove a custom config value after we made it
LOG.info("Removing CF config value");
LOG.info("hbase.hstore.compaction.min should now be 5");
hcd = new HColumnDescriptor(htd.getFamily(FAMILY));
hcd.setValue("hbase.hstore.compaction.min", null);
htd.addFamily(hcd);
admin.modifyTable(TABLE, htd);
while (null != (st = admin.getAlterStatus(TABLE)) && st.getFirst() > 0) {
LOG.debug(st.getFirst() + " regions left to update");
Thread.sleep(40);
}
LOG.info("alter status finished");
assertNull(hTable.getTableDescriptor().getFamily(FAMILY).getValue(
"hbase.hstore.compaction.min"));
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}

View File

@ -258,9 +258,7 @@ public class TestCoprocessorInterface extends HBaseTestCase {
HRegion reopenRegion(final HRegion closedRegion, Class<?> implClass)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
HRegion r = new HRegion(closedRegion.getTableDir(), closedRegion.getLog(),
closedRegion.getFilesystem(), closedRegion.getConf(),
closedRegion.getRegionInfo(), closedRegion.getTableDesc(), null);
HRegion r = new HRegion(closedRegion);
r.initialize();
// this following piece is a hack. currently a coprocessorHost

View File

@ -0,0 +1,116 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.CompoundConfiguration;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.SmallTests;
import org.junit.experimental.categories.Category;
import org.junit.Test;
import junit.framework.TestCase;
@Category(SmallTests.class)
public class TestCompoundConfiguration extends TestCase {
private Configuration baseConf;
@Override
protected void setUp() throws Exception {
baseConf = new Configuration();
baseConf.set("A", "1");
baseConf.setInt("B", 2);
baseConf.set("C", "3");
}
@Test
public void testBasicFunctionality() throws ClassNotFoundException {
CompoundConfiguration compoundConf = new CompoundConfiguration()
.add(baseConf);
assertEquals("1", compoundConf.get("A"));
assertEquals(2, compoundConf.getInt("B", 0));
assertEquals(3, compoundConf.getInt("C", 0));
assertEquals(0, compoundConf.getInt("D", 0));
assertEquals(CompoundConfiguration.class, compoundConf
.getClassByName(CompoundConfiguration.class.getName()));
try {
compoundConf.getClassByName("bad_class_name");
fail("Trying to load bad_class_name should throw an exception");
} catch (ClassNotFoundException e) {
// win!
}
}
@Test
public void testWithConfig() {
Configuration conf = new Configuration();
conf.set("B", "2b");
conf.set("C", "33");
conf.set("D", "4");
CompoundConfiguration compoundConf = new CompoundConfiguration()
.add(baseConf)
.add(conf);
assertEquals("1", compoundConf.get("A"));
assertEquals("2b", compoundConf.get("B"));
assertEquals(33, compoundConf.getInt("C", 0));
assertEquals("4", compoundConf.get("D"));
assertEquals(4, compoundConf.getInt("D", 0));
assertNull(compoundConf.get("E"));
assertEquals(6, compoundConf.getInt("F", 6));
}
private ImmutableBytesWritable strToIbw(String s) {
return new ImmutableBytesWritable(Bytes.toBytes(s));
}
@Test
public void testWithIbwMap() {
Map<ImmutableBytesWritable, ImmutableBytesWritable> map =
new HashMap<ImmutableBytesWritable, ImmutableBytesWritable>();
map.put(strToIbw("B"), strToIbw("2b"));
map.put(strToIbw("C"), strToIbw("33"));
map.put(strToIbw("D"), strToIbw("4"));
// unlike config, note that IBW Maps can accept null values
map.put(strToIbw("G"), null);
CompoundConfiguration compoundConf = new CompoundConfiguration()
.add(baseConf)
.add(map);
assertEquals("1", compoundConf.get("A"));
assertEquals("2b", compoundConf.get("B"));
assertEquals(33, compoundConf.getInt("C", 0));
assertEquals("4", compoundConf.get("D"));
assertEquals(4, compoundConf.getInt("D", 0));
assertNull(compoundConf.get("E"));
assertEquals(6, compoundConf.getInt("F", 6));
assertNull(compoundConf.get("G"));
}
@org.junit.Rule
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
}

View File

@ -217,7 +217,7 @@ public class TestSplitTransaction {
for (HRegion r: daughters) {
// Open so can count its content.
HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
r.getTableDesc(), r.getLog(), r.getConf());
r.getTableDesc(), r.getLog(), TEST_UTIL.getConfiguration());
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);
@ -273,7 +273,7 @@ public class TestSplitTransaction {
for (HRegion r: daughters) {
// Open so can count its content.
HRegion openRegion = HRegion.openHRegion(this.testdir, r.getRegionInfo(),
r.getTableDesc(), r.getLog(), r.getConf());
r.getTableDesc(), r.getLog(), TEST_UTIL.getConfiguration());
try {
int count = countRows(openRegion);
assertTrue(count > 0 && count != rowcount);