HBASE-17736 Some options can't be configured by the shell

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
CHIA-PING TSAI 2017-03-09 10:04:49 +08:00 committed by Andrew Purtell
parent 598dd9bb0e
commit 1105e1260d
2 changed files with 47 additions and 6 deletions

View File

@ -810,7 +810,14 @@ module Hbase
family.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS)
family.setCompressTags(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS)
family.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN)
family.setValue(COMPRESSION_COMPACT, arg.delete(COMPRESSION_COMPACT)) if arg.include?(COMPRESSION_COMPACT)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT)
compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT).upcase
unless org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(" "))
else
family.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
end
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase
unless org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype)
@ -1125,9 +1132,9 @@ module Hbase
htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(COMPACTION_ENABLED))) if arg[COMPACTION_ENABLED]
htd.setNormalizationEnabled(
JBoolean.valueOf(arg[NORMALIZATION_ENABLED])) if arg[NORMALIZATION_ENABLED]
JBoolean.valueOf(arg.delete(NORMALIZATION_ENABLED))) if arg[NORMALIZATION_ENABLED]
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
# DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY. To keep backward compatible, it still exists.
# However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set
@ -1139,6 +1146,10 @@ module Hbase
end
end
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
htd.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY))) if arg[org.apache.hadoop.hbase.HTableDescriptor::PRIORITY]
htd.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)) if arg[org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY]
htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg[org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION]
htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg[org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY]
htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION]
set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]

View File

@ -188,15 +188,45 @@ module Hbase
admin.create(@create_test_name, { NAME => 'a'}, { NAME => 'b'})
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
end
define_test "create should be able to set column options" do
drop_test_table(@create_test_name)
admin.create(@create_test_name,
{ NAME => 'a',
CACHE_BLOOMS_ON_WRITE => 'TRUE',
CACHE_DATA_IN_L1 => 'TRUE',
CACHE_INDEX_ON_WRITE => 'TRUE',
EVICT_BLOCKS_ON_CLOSE => 'TRUE',
COMPRESSION_COMPACT => 'GZ'})
assert_equal(['a:'], table(@create_test_name).get_all_columns.sort)
assert_match(/CACHE_BLOOMS_ON_WRITE/, admin.describe(@create_test_name))
assert_match(/CACHE_DATA_IN_L1/, admin.describe(@create_test_name))
assert_match(/CACHE_INDEX_ON_WRITE/, admin.describe(@create_test_name))
assert_match(/EVICT_BLOCKS_ON_CLOSE/, admin.describe(@create_test_name))
assert_match(/GZ/, admin.describe(@create_test_name))
end
define_test "create should be able to set table options" do
drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321')
admin.create(@create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678,
OWNER => '987654321',
PRIORITY => '77',
FLUSH_POLICY => 'org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy',
REGION_MEMSTORE_REPLICATION => 'TRUE',
SPLIT_POLICY => 'org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy',
COMPACTION_ENABLED => 'false')
assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
assert_match(/12345678/, admin.describe(@create_test_name))
assert_match(/987654321/, admin.describe(@create_test_name))
assert_match(/77/, admin.describe(@create_test_name))
assert_match(/COMPACTION_ENABLED/, admin.describe(@create_test_name))
assert_match(/REGION_MEMSTORE_REPLICATION/, admin.describe(@create_test_name))
assert_match(/org.apache.hadoop.hbase.regionserver.FlushAllLargeStoresPolicy/,
admin.describe(@create_test_name))
assert_match(/org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy/,
admin.describe(@create_test_name))
end
define_test "create should ignore table_att" do
drop_test_table(@create_test_name)
admin.create(@create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321')