HBASE-10348. HTableDescriptor changes for region replicas
git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-10070@1565658 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d8ea476bf1
commit
481a116e26
|
@ -182,6 +182,13 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||||
private static final ImmutableBytesWritable DURABILITY_KEY =
|
private static final ImmutableBytesWritable DURABILITY_KEY =
|
||||||
new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
|
new ImmutableBytesWritable(Bytes.toBytes("DURABILITY"));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* <em>INTERNAL</em> number of region replicas for the table.
|
||||||
|
*/
|
||||||
|
public static final String REGION_REPLICATION = "REGION_REPLICATION";
|
||||||
|
private static final ImmutableBytesWritable REGION_REPLICATION_KEY =
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(REGION_REPLICATION));
|
||||||
|
|
||||||
/** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
|
/** Default durability for HTD is USE_DEFAULT, which defaults to HBase-global default value */
|
||||||
private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
|
private static final Durability DEFAULT_DURABLITY = Durability.USE_DEFAULT;
|
||||||
|
|
||||||
|
@ -214,6 +221,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||||
*/
|
*/
|
||||||
public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
|
public static final long DEFAULT_MEMSTORE_FLUSH_SIZE = 1024*1024*128L;
|
||||||
|
|
||||||
|
public static final int DEFAULT_REGION_REPLICATION = 1;
|
||||||
|
|
||||||
private final static Map<String, String> DEFAULT_VALUES
|
private final static Map<String, String> DEFAULT_VALUES
|
||||||
= new HashMap<String, String>();
|
= new HashMap<String, String>();
|
||||||
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
|
private final static Set<ImmutableBytesWritable> RESERVED_KEYWORDS
|
||||||
|
@ -227,6 +236,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||||
DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
|
DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH,
|
||||||
String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
|
String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH));
|
||||||
DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
|
DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name
|
||||||
|
DEFAULT_VALUES.put(REGION_REPLICATION, String.valueOf(DEFAULT_REGION_REPLICATION));
|
||||||
for (String s : DEFAULT_VALUES.keySet()) {
|
for (String s : DEFAULT_VALUES.keySet()) {
|
||||||
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
|
RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s)));
|
||||||
}
|
}
|
||||||
|
@ -1067,6 +1077,26 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
||||||
return Collections.unmodifiableCollection(this.families.values());
|
return Collections.unmodifiableCollection(this.families.values());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the configured replicas per region
|
||||||
|
*/
|
||||||
|
public int getRegionReplication() {
|
||||||
|
byte[] val = getValue(REGION_REPLICATION_KEY);
|
||||||
|
if (val == null || val.length == 0) {
|
||||||
|
return DEFAULT_REGION_REPLICATION;
|
||||||
|
}
|
||||||
|
return Integer.parseInt(Bytes.toString(val));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the number of replicas per region.
|
||||||
|
* @param regionReplication the replication factor per region
|
||||||
|
*/
|
||||||
|
public void setRegionReplication(int regionReplication) {
|
||||||
|
setValue(REGION_REPLICATION_KEY,
|
||||||
|
new ImmutableBytesWritable(Bytes.toBytes(Integer.toString(regionReplication))));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns all the column family names of the current table. The map of
|
* Returns all the column family names of the current table. The map of
|
||||||
* HTableDescriptor contains mapping of family name to HColumnDescriptors.
|
* HTableDescriptor contains mapping of family name to HColumnDescriptors.
|
||||||
|
|
|
@ -49,12 +49,14 @@ public class TestHTableDescriptor {
|
||||||
htd.setMaxFileSize(v);
|
htd.setMaxFileSize(v);
|
||||||
htd.setDurability(Durability.ASYNC_WAL);
|
htd.setDurability(Durability.ASYNC_WAL);
|
||||||
htd.setReadOnly(true);
|
htd.setReadOnly(true);
|
||||||
|
htd.setRegionReplication(2);
|
||||||
byte [] bytes = htd.toByteArray();
|
byte [] bytes = htd.toByteArray();
|
||||||
HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes);
|
HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes);
|
||||||
assertEquals(htd, deserializedHtd);
|
assertEquals(htd, deserializedHtd);
|
||||||
assertEquals(v, deserializedHtd.getMaxFileSize());
|
assertEquals(v, deserializedHtd.getMaxFileSize());
|
||||||
assertTrue(deserializedHtd.isReadOnly());
|
assertTrue(deserializedHtd.isReadOnly());
|
||||||
assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
|
assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
|
||||||
|
assertEquals(deserializedHtd.getRegionReplication(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -57,6 +57,7 @@ module HBaseConstants
|
||||||
SPLITS_FILE = 'SPLITS_FILE'
|
SPLITS_FILE = 'SPLITS_FILE'
|
||||||
SPLITALGO = 'SPLITALGO'
|
SPLITALGO = 'SPLITALGO'
|
||||||
NUMREGIONS = 'NUMREGIONS'
|
NUMREGIONS = 'NUMREGIONS'
|
||||||
|
REGION_REPLICATION = 'REGION_REPLICATION'
|
||||||
CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
|
CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
|
||||||
ATTRIBUTES="ATTRIBUTES"
|
ATTRIBUTES="ATTRIBUTES"
|
||||||
VISIBILITY="VISIBILITY"
|
VISIBILITY="VISIBILITY"
|
||||||
|
|
|
@ -220,6 +220,10 @@ module Hbase
|
||||||
has_columns = true
|
has_columns = true
|
||||||
next
|
next
|
||||||
end
|
end
|
||||||
|
if arg.has_key?(REGION_REPLICATION)
|
||||||
|
region_replication = JInteger.valueOf(arg.delete(REGION_REPLICATION))
|
||||||
|
htd.setRegionReplication(region_replication)
|
||||||
|
end
|
||||||
|
|
||||||
# Get rid of the "METHOD", which is deprecated for create.
|
# Get rid of the "METHOD", which is deprecated for create.
|
||||||
# We'll do whatever it used to do below if it's table_att.
|
# We'll do whatever it used to do below if it's table_att.
|
||||||
|
|
|
@ -49,7 +49,7 @@ Examples:
|
||||||
hbase> # Optionally pre-split the table into NUMREGIONS, using
|
hbase> # Optionally pre-split the table into NUMREGIONS, using
|
||||||
hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname)
|
hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname)
|
||||||
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
|
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
|
||||||
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit', CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'}}
|
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit', REGION_REPLICATION => 2, CONFIGURATION => {'hbase.hregion.scan.loadColumnFamiliesOnDemand' => 'true'}}
|
||||||
|
|
||||||
You can also keep around a reference to the created table:
|
You can also keep around a reference to the created table:
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue