HBASE-6894 Adding metadata to a table in the shell is both arcane and painful

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1406800 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-11-07 21:00:32 +00:00
parent e4a99e9794
commit 4ae5cc07c1
5 changed files with 265 additions and 216 deletions

View File

@ -781,13 +781,15 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s; if (reservedKeys.isEmpty() && configKeys.isEmpty()) return s;
// step 2: printing // step 2: printing
s.append(", {METHOD => 'table_att'"); s.append(", {TABLE_ATTRIBUTES => {");
// print all reserved keys first // print all reserved keys first
boolean printCommaForAttr = false;
for (ImmutableBytesWritable k : reservedKeys) { for (ImmutableBytesWritable k : reservedKeys) {
String key = Bytes.toString(k.get()); String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get()); String value = Bytes.toString(values.get(k).get());
s.append(", "); if (printCommaForAttr) s.append(", ");
printCommaForAttr = true;
s.append(key); s.append(key);
s.append(" => "); s.append(" => ");
s.append('\'').append(value).append('\''); s.append('\'').append(value).append('\'');
@ -795,15 +797,16 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
if (!configKeys.isEmpty()) { if (!configKeys.isEmpty()) {
// print all non-reserved, advanced config keys as a separate subset // print all non-reserved, advanced config keys as a separate subset
s.append(", "); if (printCommaForAttr) s.append(", ");
printCommaForAttr = true;
s.append(HConstants.CONFIG).append(" => "); s.append(HConstants.CONFIG).append(" => ");
s.append("{"); s.append("{");
boolean printComma = false; boolean printCommaForCfg = false;
for (ImmutableBytesWritable k : configKeys) { for (ImmutableBytesWritable k : configKeys) {
String key = Bytes.toString(k.get()); String key = Bytes.toString(k.get());
String value = Bytes.toString(values.get(k).get()); String value = Bytes.toString(values.get(k).get());
if (printComma) s.append(", "); if (printCommaForCfg) s.append(", ");
printComma = true; printCommaForCfg = true;
s.append('\'').append(key).append('\''); s.append('\'').append(key).append('\'');
s.append(" => "); s.append(" => ");
s.append('\'').append(value).append('\''); s.append('\'').append(value).append('\'');
@ -811,8 +814,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
s.append("}"); s.append("}");
} }
s.append('}'); // end METHOD s.append("}}"); // end METHOD
return s; return s;
} }

View File

@ -186,9 +186,7 @@ module Hbase
# Flatten params array # Flatten params array
args = args.flatten.compact args = args.flatten.compact
has_columns = false
# Fail if no column families defined
raise(ArgumentError, "Table must have at least one column family") if args.empty?
# Start defining the table # Start defining the table
htd = org.apache.hadoop.hbase.HTableDescriptor.new(table_name) htd = org.apache.hadoop.hbase.HTableDescriptor.new(table_name)
@ -200,68 +198,69 @@ module Hbase
raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type") raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
end end
if arg.kind_of?(String) # First, handle all the cases where arg is a column family.
# the arg is a string, default action is to add a column to the table if arg.kind_of?(String) or arg.has_key?(NAME)
# If the arg is a string, default action is to add a column to the table.
# If arg has a name, it must also be a column descriptor.
htd.addFamily(hcd(arg, htd)) htd.addFamily(hcd(arg, htd))
else has_columns = true
# arg is a hash. 4 possibilities: next
if (arg.has_key?(SPLITS) or arg.has_key?(SPLITS_FILE)) end
# Get rid of the "METHOD", which is deprecated for create.
# We'll do whatever it used to do below if it's table_att.
if (method = arg.delete(METHOD))
raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
end
# The hash is not a column family. Figure out what's in it.
# First, handle splits.
if arg.has_key?(SPLITS_FILE) if arg.has_key?(SPLITS_FILE)
unless File.exist?(arg[SPLITS_FILE]) splits_file = arg.delete(SPLITS_FILE)
raise(ArgumentError, "Splits file #{arg[SPLITS_FILE]} doesn't exist") unless File.exist?(splits_file)
raise(ArgumentError, "Splits file #{splits_file} doesn't exist")
end end
arg[SPLITS] = [] arg[SPLITS] = []
File.foreach(arg[SPLITS_FILE]) do |line| File.foreach(splits_file) do |line|
arg[SPLITS].push(line.strip()) arg[SPLITS].push(line.strip())
end end
end end
if arg.has_key?(SPLITS)
splits = Java::byte[][arg[SPLITS].size].new splits = Java::byte[][arg[SPLITS].size].new
idx = 0 idx = 0
arg[SPLITS].each do |split| arg.delete(SPLITS).each do |split|
splits[idx] = org.apache.hadoop.hbase.util.Bytes.toBytesBinary(split) splits[idx] = org.apache.hadoop.hbase.util.Bytes.toBytesBinary(split)
idx = idx + 1 idx = idx + 1
end end
elsif (arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO)) elsif arg.has_key?(NUMREGIONS) or arg.has_key?(SPLITALGO)
# (1) deprecated region pre-split API # deprecated region pre-split API; if one of the above is specified, will be ignored.
raise(ArgumentError, "Column family configuration should be specified in a separate clause") if arg.has_key?(NAME)
raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS) raise(ArgumentError, "Number of regions must be specified") unless arg.has_key?(NUMREGIONS)
raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO) raise(ArgumentError, "Split algorithm must be specified") unless arg.has_key?(SPLITALGO)
raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1 raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
num_regions = arg[NUMREGIONS] num_regions = arg.delete(NUMREGIONS)
split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO]) split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg.delete(SPLITALGO))
splits = split_algo.split(JInteger.valueOf(num_regions))
elsif (method = arg.delete(METHOD))
# (2) table_att modification
raise(ArgumentError, "table_att is currently the only supported method") unless method == 'table_att'
raise(ArgumentError, "NUMREGIONS & SPLITALGO must both be specified") unless arg.has_key?(NUMREGIONS) == arg.has_key?(split_algo)
htd.setMaxFileSize(JLong.valueOf(arg[MAX_FILESIZE])) if arg[MAX_FILESIZE]
htd.setReadOnly(JBoolean.valueOf(arg[READONLY])) if arg[READONLY]
htd.setMemStoreFlushSize(JLong.valueOf(arg[MEMSTORE_FLUSHSIZE])) if arg[MEMSTORE_FLUSHSIZE]
htd.setDeferredLogFlush(JBoolean.valueOf(arg[DEFERRED_LOG_FLUSH])) if arg[DEFERRED_LOG_FLUSH]
htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
if arg[NUMREGIONS]
raise(ArgumentError, "Number of regions must be greater than 1") unless arg[NUMREGIONS] > 1
num_regions = arg[NUMREGIONS]
split_algo = RegionSplitter.newSplitAlgoInstance(@conf, arg[SPLITALGO])
splits = split_algo.split(JInteger.valueOf(num_regions)) splits = split_algo.split(JInteger.valueOf(num_regions))
end end
# Done with splits; apply formerly-table_att parameters.
htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH]
if arg[CONFIG] if arg[CONFIG]
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash) apply_config(htd, arg.delete(CONFIG))
for k,v in arg[CONFIG] end
v = v.to_s unless v.nil?
htd.setValue(k, v) arg.each_key do |ignored_key|
end puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ])
end
else
# (3) column family spec
descriptor = hcd(arg, htd)
htd.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT]) if arg[COMPRESSION_COMPACT]
htd.addFamily(hcd(arg, htd))
end
end end
end end
# Fail if no column families defined
raise(ArgumentError, "Table must have at least one column family") if !has_columns
if splits.nil? if splits.nil?
# Perform the create table call # Perform the create table call
@admin.createTable(htd) @admin.createTable(htd)
@ -368,68 +367,85 @@ module Hbase
# Process all args # Process all args
args.each do |arg| args.each do |arg|
# Normalize args to support column name only alter specs # Normalize args to support column name only alter specs
arg = { NAME => arg } if arg.kind_of?(String) arg = { NAME => arg } if arg.kind_of?(String)
# Normalize args to support shortcut delete syntax # Normalize args to support shortcut delete syntax
arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete'] arg = { METHOD => 'delete', NAME => arg['delete'] } if arg['delete']
# No method parameter, try to use the args as a column definition # There are 3 possible options.
unless method = arg.delete(METHOD) # 1) Column family spec. Distinguished by having a NAME and no METHOD.
# Note that we handle owner here, and also below (see (2)) as part of the "METHOD => 'table_att'" table attributes. method = arg.delete(METHOD)
# In other words, if OWNER is specified, then METHOD is set to table_att. if method == nil and arg.has_key?(NAME)
# alter 'tablename', {OWNER => 'username'} (that is, METHOD => 'table_att' is not specified).
if arg[OWNER]
htd.setOwnerString(arg[OWNER])
@admin.modifyTable(table_name.to_java_bytes, htd)
return
end
descriptor = hcd(arg, htd) descriptor = hcd(arg, htd)
if arg[COMPRESSION_COMPACT]
descriptor.setValue(COMPRESSION_COMPACT, arg[COMPRESSION_COMPACT])
end
column_name = descriptor.getNameAsString column_name = descriptor.getNameAsString
# If column already exist, then try to alter it. Create otherwise. # If column already exist, then try to alter it. Create otherwise.
if htd.hasFamily(column_name.to_java_bytes) if htd.hasFamily(column_name.to_java_bytes)
@admin.modifyColumn(table_name, descriptor) @admin.modifyColumn(table_name, descriptor)
if wait == true
puts "Updating all regions with the new schema..."
alter_status(table_name)
end
else else
@admin.addColumn(table_name, descriptor) @admin.addColumn(table_name, descriptor)
end
if wait == true if wait == true
puts "Updating all regions with the new schema..." puts "Updating all regions with the new schema..."
alter_status(table_name) alter_status(table_name)
end end
end
# We bypass descriptor when adding column families; refresh it to apply other args correctly.
htd = @admin.getTableDescriptor(table_name.to_java_bytes)
next next
end end
# 2) Method other than table_att, with some args.
name = arg.delete(NAME)
if method != nil and method != "table_att"
# Delete column family # Delete column family
if method == "delete" if method == "delete"
raise(ArgumentError, "NAME parameter missing for delete method") unless arg[NAME] raise(ArgumentError, "NAME parameter missing for delete method") unless name
@admin.deleteColumn(table_name, arg[NAME]) @admin.deleteColumn(table_name, name)
# Unset table attributes
elsif method == "table_att_unset"
raise(ArgumentError, "NAME parameter missing for table_att_unset method") unless name
if (htd.getValue(name) == nil)
raise ArgumentError, "Can not find attribute: #{name}"
end
htd.remove(name.to_java_bytes)
@admin.modifyTable(table_name.to_java_bytes, htd)
# Unknown method
else
raise ArgumentError, "Unknown method: #{method}"
end
arg.each_key do |unknown_key|
puts("Unknown argument ignored: %s" % [unknown_key])
end
if wait == true if wait == true
puts "Updating all regions with the new schema..." puts "Updating all regions with the new schema..."
alter_status(table_name) alter_status(table_name)
end end
if method == "delete"
# We bypass descriptor when deleting column families; refresh it to apply other args correctly.
htd = @admin.getTableDescriptor(table_name.to_java_bytes)
end
next next
end end
# Change table attributes # 3) Some args for the table, optionally with METHOD => table_att (deprecated)
if method == "table_att" raise(ArgumentError, "NAME argument in an unexpected place") if name
htd.setMaxFileSize(JLong.valueOf(arg[MAX_FILESIZE])) if arg[MAX_FILESIZE] htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
htd.setReadOnly(JBoolean.valueOf(arg[READONLY])) if arg[READONLY] apply_config(htd, arg.delete(CONFIG)) if arg[CONFIG]
htd.setMemStoreFlushSize(JLong.valueOf(arg[MEMSTORE_FLUSHSIZE])) if arg[MEMSTORE_FLUSHSIZE] htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
htd.setDeferredLogFlush(JBoolean.valueOf(arg[DEFERRED_LOG_FLUSH])) if arg[DEFERRED_LOG_FLUSH] htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
# (2) Here, we handle the alternate syntax of ownership setting, where method => 'table_att' is specified. htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
htd.setOwnerString(arg[OWNER]) if arg[OWNER] htd.setDeferredLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH]
# set a coprocessor attribute # set a coprocessor attribute
valid_coproc_keys = []
if arg.kind_of?(Hash) if arg.kind_of?(Hash)
arg.each do |key, value| arg.each do |key, value|
k = String.new(key) # prepare to strip k = String.new(key) # prepare to strip
@ -455,46 +471,26 @@ module Hbase
end end
maxId += 1 maxId += 1
htd.setValue(k + "\$" + maxId.to_s, value) htd.setValue(k + "\$" + maxId.to_s, value)
end valid_coproc_keys << key
end end
end end
if arg[CONFIG] valid_coproc_keys.each do |key|
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash) arg.delete(key)
for k,v in arg[CONFIG]
v = v.to_s unless v.nil?
htd.setValue(k, v)
end
end end
@admin.modifyTable(table_name.to_java_bytes, htd) @admin.modifyTable(table_name.to_java_bytes, htd)
arg.each_key do |unknown_key|
puts("Unknown argument ignored: %s" % [unknown_key])
end
if wait == true if wait == true
puts "Updating all regions with the new schema..." puts "Updating all regions with the new schema..."
alter_status(table_name) alter_status(table_name)
end end
next next
end end
# Unset table attributes
if method == "table_att_unset"
if arg.kind_of?(Hash)
if (!arg[NAME])
next
end
if (htd.getValue(arg[NAME]) == nil)
raise ArgumentError, "Can not find attribute: #{arg[NAME]}"
end
htd.remove(arg[NAME].to_java_bytes)
@admin.modifyTable(table_name.to_java_bytes, htd)
if wait == true
puts "Updating all regions with the new schema..."
alter_status(table_name)
end
end
next
end
# Unknown method
raise ArgumentError, "Unknown method: #{method}"
end end
end end
@ -574,24 +570,25 @@ module Hbase
# String arg, single parameter constructor # String arg, single parameter constructor
return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String) return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.kind_of?(String)
raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg[NAME] raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME)
family = htd.getFamily(name.to_java_bytes) family = htd.getFamily(name.to_java_bytes)
# create it if it's a new family # create it if it's a new family
family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes) family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes)
family.setBlockCacheEnabled(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE) family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE)
family.setScope(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE)
family.setInMemory(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
family.setTimeToLive(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::TTL])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setTimeToLive(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
family.setEncodeOnDisk(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK) family.setEncodeOnDisk(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCODE_ON_DISK)
family.setBlocksize(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
family.setMaxVersions(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS) family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
family.setMinVersions(JInteger.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS) family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)
family.setKeepDeletedCells(JBoolean.valueOf(arg[org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS])) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS) family.setKeepDeletedCells(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS)
family.setValue(COMPRESSION_COMPACT, arg.delete(COMPRESSION_COMPACT)) if arg.include?(COMPRESSION_COMPACT)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
bloomtype = arg[org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER].upcase bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase
unless org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.include?(bloomtype) unless org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.include?(bloomtype)
raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" ")) raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(" "))
else else
@ -599,7 +596,7 @@ module Hbase
end end
end end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
compression = arg[org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION].upcase compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase
unless org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.include?(compression) unless org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.include?(compression)
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.join(" ")) raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.hfile.Compression::Algorithm.constants.join(" "))
else else
@ -607,13 +604,14 @@ module Hbase
end end
end end
if arg[CONFIG] if config = arg.delete(CONFIG)
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless arg.kind_of?(Hash) apply_config(family, config)
for k,v in arg[CONFIG]
v = v.to_s unless v.nil?
family.setValue(k, v)
end end
arg.each_key do |unknown_key|
puts("Unknown argument ignored for column family %s: %s" % [name, unknown_key])
end end
return family return family
end end
@ -639,5 +637,15 @@ module Hbase
put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri)) put.add(org.apache.hadoop.hbase.HConstants::CATALOG_FAMILY, org.apache.hadoop.hbase.HConstants::REGIONINFO_QUALIFIER, org.apache.hadoop.hbase.util.Writables.getBytes(hri))
meta.put(put) meta.put(put)
end end
# Apply config to table/column descriptor
def apply_config(descriptor, config)
raise(ArgumentError, "#{CONFIG} must be a Hash type") unless config.kind_of?(Hash)
for k,v in config
v = v.to_s unless v.nil?
descriptor.setValue(k, v)
end
end
end end
end end

View File

@ -22,34 +22,35 @@ module Shell
class Alter < Command class Alter < Command
def help def help
return <<-EOF return <<-EOF
Alter column family schema; pass table name and a dictionary Alter a table. Table must be disabled to be altered (see help 'disable').
specifying new column family schema. Dictionaries are described You can add/modify/delete column families, as well as change table
on the main help command output. Dictionary must include name configuration. Column families work similarly to create; column family
of column family to alter. For example, spec can either be a name string, or a dictionary with NAME attribute.
Dictionaries are described on the main help command output.
To change or add the 'f1' column family in table 't1' from defaults For example, to change or add the 'f1' column family in table 't1' from
to instead keep a maximum of 5 cell VERSIONS, do: current value to keep a maximum of 5 cell VERSIONS, do:
hbase> alter 't1', NAME => 'f1', VERSIONS => 5 hbase> alter 't1', NAME => 'f1', VERSIONS => 5
To delete the 'f1' column family in table 't1', do: You can operate on several column families:
hbase> alter 't1', 'f1', {NAME => 'f2', IN_MEMORY => true}, {NAME => 'f3', VERSIONS => 5}
To delete the 'f1' column family in table 't1', use one of:
hbase> alter 't1', NAME => 'f1', METHOD => 'delete' hbase> alter 't1', NAME => 'f1', METHOD => 'delete'
or a shorter version:
hbase> alter 't1', 'delete' => 'f1' hbase> alter 't1', 'delete' => 'f1'
You can also change table-scope attributes like MAX_FILESIZE You can also change table-scope attributes like MAX_FILESIZE, READONLY,
MEMSTORE_FLUSHSIZE, READONLY, and DEFERRED_LOG_FLUSH. MEMSTORE_FLUSHSIZE, DEFERRED_LOG_FLUSH, etc. These can be put at the end;
for example, to change the max size of a region to 128MB, do:
For example, to change the max size of a family to 128MB, do: hbase> alter 't1', MAX_FILESIZE => '134217728'
hbase> alter 't1', METHOD => 'table_att', MAX_FILESIZE => '134217728'
You can add a table coprocessor by setting a table coprocessor attribute: You can add a table coprocessor by setting a table coprocessor attribute:
hbase> alter 't1', METHOD => 'table_att', hbase> alter 't1',
'coprocessor'=>'hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2' 'coprocessor'=>'hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2'
Since you can have multiple coprocessors configured for a table, a Since you can have multiple coprocessors configured for a table, a
@ -69,7 +70,9 @@ You can also remove a table-scope attribute:
There could be more than one alteration in one command: There could be more than one alteration in one command:
hbase> alter 't1', {NAME => 'f1'}, {NAME => 'f2', METHOD => 'delete'} hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 },
{ MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' },
OWNER => 'johndoe', CONFIG => { 'mykey' => 'myvalue' }
EOF EOF
end end

View File

@ -22,9 +22,11 @@ module Shell
class Create < Command class Create < Command
def help def help
return <<-EOF return <<-EOF
Create table; pass table name, a dictionary of specifications per Creates a table. Pass a table name, and a set of column family
column family, and optionally a dictionary of table configuration. specifications (at least one), and, optionally, table configuration.
Dictionaries are described below in the GENERAL NOTES section. Column specification can be a simple string (name), or a dictionary
(dictionaries are described below in main help output), necessarily
including NAME attribute.
Examples: Examples:
hbase> create 't1', {NAME => 'f1', VERSIONS => 5} hbase> create 't1', {NAME => 'f1', VERSIONS => 5}
@ -32,18 +34,23 @@ Examples:
hbase> # The above in shorthand would be the following: hbase> # The above in shorthand would be the following:
hbase> create 't1', 'f1', 'f2', 'f3' hbase> create 't1', 'f1', 'f2', 'f3'
hbase> create 't1', {NAME => 'f1', VERSIONS => 1, TTL => 2592000, BLOCKCACHE => true} hbase> create 't1', {NAME => 'f1', VERSIONS => 1, TTL => 2592000, BLOCKCACHE => true}
hbase> create 't1', 'f1', {SPLITS => ['10', '20', '30', '40']}
hbase> create 't1', 'f1', {SPLITS_FILE => 'splits.txt'} Table configuration options can be put at the end.
Examples:
hbase> create 't1', 'f1', SPLITS => ['10', '20', '30', '40']
hbase> create 't1', 'f1', SPLITS_FILE => 'splits.txt', OWNER => 'johndoe'
hbase> create 't1', {NAME => 'f1', VERSIONS => 5}, CONFIG => { 'mykey' => 'myvalue' }
hbase> # Optionally pre-split the table into NUMREGIONS, using hbase> # Optionally pre-split the table into NUMREGIONS, using
hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname) hbase> # SPLITALGO ("HexStringSplit", "UniformSplit" or classname)
hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'} hbase> create 't1', 'f1', {NUMREGIONS => 15, SPLITALGO => 'HexStringSplit'}
You can also keep around a reference to the created table: You can also keep around a reference to the created table:
hbase> t1 = create 't1', 'f1' hbase> t1 = create 't1', 'f1'
Which gives you a reference to the table named 't1', on which you can then Which gives you a reference to the table named 't1', on which you can then
call methods. call methods.
EOF EOF
end end

View File

@ -149,6 +149,13 @@ module Hbase
end end
end end
define_test "create should fail without columns when called with options" do
drop_test_table(@create_test_name)
assert_raise(ArgumentError) do
admin.create(@create_test_name, { OWNER => 'a' })
end
end
define_test "create should work with string column args" do define_test "create should work with string column args" do
drop_test_table(@create_test_name) drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b') admin.create(@create_test_name, 'a', 'b')
@ -161,11 +168,27 @@ module Hbase
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
end end
define_test "create should be able to set table options" do
drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321')
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
assert_match(/12345678/, admin.describe(@create_test_name))
assert_match(/987654321/, admin.describe(@create_test_name))
end
define_test "create should ignore table_att" do
drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321')
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
assert_match(/987654321/, admin.describe(@create_test_name))
end
define_test "create should work with SPLITALGO" do define_test "create should work with SPLITALGO" do
drop_test_table(@create_test_name) drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b', {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'}) admin.create(@create_test_name, 'a', 'b', {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'})
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort) assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
end end
#------------------------------------------------------------------------------- #-------------------------------------------------------------------------------
define_test "describe should fail for non-existent tables" do define_test "describe should fail for non-existent tables" do
@ -253,9 +276,10 @@ module Hbase
define_test "alter should support more than one alteration in one call" do define_test "alter should support more than one alteration in one call" do
assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort) assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
admin.alter(@test_name, true, { NAME => 'z' }, { METHOD => 'delete', NAME => 'y' }) admin.alter(@test_name, true, { NAME => 'z' }, { METHOD => 'delete', NAME => 'y' }, 'MAX_FILESIZE' => 12345678)
admin.enable(@test_name) admin.enable(@test_name)
assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort) assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort)
assert_match(/12345678/, admin.describe(@test_name))
end end
define_test 'alter should support shortcut DELETE alter specs' do define_test 'alter should support shortcut DELETE alter specs' do
@ -269,6 +293,11 @@ module Hbase
assert_match(/12345678/, admin.describe(@test_name)) assert_match(/12345678/, admin.describe(@test_name))
end end
define_test "alter should be able to change table options w/o table_att" do
admin.alter(@test_name, true, 'MAX_FILESIZE' => 12345678)
assert_match(/12345678/, admin.describe(@test_name))
end
define_test "alter should be able to change coprocessor attributes" do define_test "alter should be able to change coprocessor attributes" do
drop_test_table(@test_name) drop_test_table(@test_name)
create_test_table(@test_name) create_test_table(@test_name)