HBASE-20819 Use TableDescriptor to replace HTableDescriptor in hbase-shell module (#1959)

* HBASE-20819 Use TableDescriptor to replace HTableDescriptor in hbase-shell module

* Resolve whitespace issues

* Use removeValue rather than setValue where possible in hbase-shell

* Add String removeValue overload to TableDescriptorBuilder

* Convert remaining usages of HTableDescriptor in security.rb

* Rename hcd (HColumnDescriptor) to cfd (ColumnFamilyDescriptor)

* Add back 3 constants to HBaseConstants in hbase-shell

* Fix bad constant reference

Signed-off-by: Sean Busbey <busbey@apache.org>
Signed-off-by: stack <stack@apache.org>
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Elliot 2020-06-26 15:39:34 -04:00 committed by GitHub
parent 51ad588565
commit 83f27b5287
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 177 additions and 111 deletions

View File

@ -396,6 +396,11 @@ public class TableDescriptorBuilder {
return this;
}
public TableDescriptorBuilder removeValue(final String key) {
desc.removeValue(key);
return this;
}
public TableDescriptorBuilder removeValue(Bytes key) {
desc.removeValue(key);
return this;
@ -736,6 +741,17 @@ public class TableDescriptorBuilder {
}
}
/**
* Remove metadata represented by the key from the {@link #values} map
*
* @param key Key whose key and value we're to remove from TableDescriptor
* parameters.
* @return the modifyable TD
*/
public ModifyableTableDescriptor removeValue(final String key) {
return setValue(key, (String) null);
}
/**
* Remove metadata represented by the key from the {@link #values} map
*

View File

@ -25,6 +25,8 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter
java_import org.apache.hadoop.hbase.util.Bytes
java_import org.apache.hadoop.hbase.ServerName
java_import org.apache.hadoop.hbase.TableName
java_import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder
java_import org.apache.hadoop.hbase.client.TableDescriptorBuilder
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
@ -429,7 +431,7 @@ module Hbase
has_columns = false
# Start defining the table
htd = org.apache.hadoop.hbase.HTableDescriptor.new(org.apache.hadoop.hbase.TableName.valueOf(table_name))
tdb = TableDescriptorBuilder.newBuilder(TableName.valueOf(table_name))
splits = nil
# Args are either columns or splits, add them to the table definition
# TODO: add table options support
@ -442,20 +444,20 @@ module Hbase
if arg.is_a?(String) || arg.key?(NAME)
# If the arg is a string, default action is to add a column to the table.
# If arg has a name, it must also be a column descriptor.
descriptor = hcd(arg, htd)
descriptor = cfd(arg, tdb)
# Warn if duplicate columns are added
if htd.hasFamily(descriptor.getName)
if tdb.build.hasColumnFamily(descriptor.getName)
puts "Family '" + descriptor.getNameAsString + "' already exists, the old one will be replaced"
htd.modifyFamily(descriptor)
tdb.modifyColumnFamily(descriptor)
else
htd.addFamily(descriptor)
tdb.setColumnFamily(descriptor)
end
has_columns = true
next
end
if arg.key?(REGION_REPLICATION)
region_replication = JInteger.valueOf(arg.delete(REGION_REPLICATION))
htd.setRegionReplication(region_replication)
tdb.setRegionReplication(region_replication)
end
# Get rid of the "METHOD", which is deprecated for create.
@ -475,7 +477,7 @@ module Hbase
File.foreach(splits_file) do |line|
arg[SPLITS].push(line.chomp)
end
htd.setValue(SPLITS_FILE, arg[SPLITS_FILE])
tdb.setValue(SPLITS_FILE, arg[SPLITS_FILE])
end
if arg.key?(SPLITS)
@ -496,7 +498,7 @@ module Hbase
end
# Done with splits; apply formerly-table_att parameters.
update_htd_from_arg(htd, arg)
update_tdb_from_arg(tdb, arg)
arg.each_key do |ignored_key|
puts(format('An argument ignored (unknown or overridden): %s', ignored_key))
@ -508,10 +510,10 @@ module Hbase
if splits.nil?
# Perform the create table call
@admin.createTable(htd)
@admin.createTable(tdb.build)
else
# Perform the create table call
@admin.createTable(htd, splits)
@admin.createTable(tdb.build, splits)
end
end
@ -667,7 +669,7 @@ module Hbase
table_name = TableName.valueOf(table_name_str)
# Get table descriptor
htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getDescriptor(table_name))
tdb = TableDescriptorBuilder.newBuilder(@admin.getDescriptor(table_name))
hasTableUpdate = false
# Process all args
@ -682,14 +684,14 @@ module Hbase
# 1) Column family spec. Distinguished by having a NAME and no METHOD.
method = arg.delete(METHOD)
if method.nil? && arg.key?(NAME)
descriptor = hcd(arg, htd)
descriptor = cfd(arg, tdb)
column_name = descriptor.getNameAsString
# If column already exist, then try to alter it. Create otherwise.
if htd.hasFamily(column_name.to_java_bytes)
htd.modifyFamily(descriptor)
if tdb.build.hasColumnFamily(column_name.to_java_bytes)
tdb.modifyColumnFamily(descriptor)
else
htd.addFamily(descriptor)
tdb.setColumnFamily(descriptor)
end
hasTableUpdate = true
next
@ -701,23 +703,23 @@ module Hbase
# Delete column family
if method == 'delete'
raise(ArgumentError, 'NAME parameter missing for delete method') unless name
htd.removeFamily(name.to_java_bytes)
tdb.removeColumnFamily(name.to_java_bytes)
hasTableUpdate = true
# Unset table attributes
elsif method == 'table_att_unset'
raise(ArgumentError, 'NAME parameter missing for table_att_unset method') unless name
if name.is_a?(Array)
name.each do |key|
if htd.getValue(key).nil?
if tdb.build.getValue(key).nil?
raise ArgumentError, "Could not find attribute: #{key}"
end
htd.remove(key)
tdb.removeValue(key)
end
else
if htd.getValue(name).nil?
if tdb.build.getValue(name).nil?
raise ArgumentError, "Could not find attribute: #{name}"
end
htd.remove(name)
tdb.removeValue(name)
end
hasTableUpdate = true
# Unset table configuration
@ -725,16 +727,16 @@ module Hbase
raise(ArgumentError, 'NAME parameter missing for table_conf_unset method') unless name
if name.is_a?(Array)
name.each do |key|
if htd.getConfigurationValue(key).nil?
if tdb.build.getValue(key).nil?
raise ArgumentError, "Could not find configuration: #{key}"
end
htd.removeConfiguration(key)
tdb.removeValue(key)
end
else
if htd.getConfigurationValue(name).nil?
if tdb.build.getValue(name).nil?
raise ArgumentError, "Could not find configuration: #{name}"
end
htd.removeConfiguration(name)
tdb.removeValue(name)
end
hasTableUpdate = true
# Unknown method
@ -750,7 +752,7 @@ module Hbase
end
# 3) Some args for the table, optionally with METHOD => table_att (deprecated)
update_htd_from_arg(htd, arg)
update_tdb_from_arg(tdb, arg)
# set a coprocessor attribute
valid_coproc_keys = []
@ -763,7 +765,10 @@ module Hbase
v = String.new(value)
v.strip!
# TODO: We should not require user to config the coprocessor with our inner format.
htd.addCoprocessorWithSpec(v)
# This is a roundabout approach, but will be replaced shortly since
# the setCoprocessorWithSpec method is marked for deprecation.
coprocessor_descriptors = tdb.build.setCoprocessorWithSpec(v).getCoprocessorDescriptors
tdb.setCoprocessors(coprocessor_descriptors)
valid_coproc_keys << key
end
@ -782,7 +787,7 @@ module Hbase
# Bulk apply all table modifications.
if hasTableUpdate
future = @admin.modifyTableAsync(htd)
future = @admin.modifyTableAsync(tdb.build)
if wait == true
puts 'Updating all regions with the new schema...'
@ -971,101 +976,102 @@ module Hbase
end
#----------------------------------------------------------------------------------------------
# Return a new HColumnDescriptor made of passed args
def hcd(arg, htd)
# Return a new ColumnFamilyDescriptor made of passed args
def cfd(arg, tdb)
# String arg, single parameter constructor
return org.apache.hadoop.hbase.HColumnDescriptor.new(arg) if arg.is_a?(String)
return ColumnFamilyDescriptorBuilder.of(arg) if arg.is_a?(String)
raise(ArgumentError, "Column family #{arg} must have a name") unless name = arg.delete(NAME)
family = htd.getFamily(name.to_java_bytes)
descriptor = tdb.build.getColumnFamily(name.to_java_bytes)
unless descriptor.nil?
cfdb = ColumnFamilyDescriptorBuilder.newBuilder(descriptor)
end
# create it if it's a new family
family ||= org.apache.hadoop.hbase.HColumnDescriptor.new(name.to_java_bytes)
cfdb ||= ColumnFamilyDescriptorBuilder.newBuilder(name.to_java_bytes)
family.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKCACHE)
family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE)
family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE)
family.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_INDEX_ON_WRITE)
family.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_BLOOMS_ON_WRITE)
family.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::EVICT_BLOCKS_ON_CLOSE)
family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
family.setInMemoryCompaction(
org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))
cfdb.setBlockCacheEnabled(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::BLOCKCACHE))) if arg.include?(ColumnFamilyDescriptorBuilder::BLOCKCACHE)
cfdb.setScope(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE))) if arg.include?(ColumnFamilyDescriptorBuilder::REPLICATION_SCOPE)
cfdb.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_DATA_ON_WRITE)
cfdb.setCacheIndexesOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_INDEX_ON_WRITE)
cfdb.setCacheBloomsOnWrite(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE))) if arg.include?(ColumnFamilyDescriptorBuilder::CACHE_BLOOMS_ON_WRITE)
cfdb.setEvictBlocksOnClose(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE))) if arg.include?(ColumnFamilyDescriptorBuilder::EVICT_BLOCKS_ON_CLOSE)
cfdb.setInMemory(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IN_MEMORY))) if arg.include?(ColumnFamilyDescriptorBuilder::IN_MEMORY)
if arg.include?(ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION)
cfdb.setInMemoryCompaction(
org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION))
)
end
family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
family.setMaxVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::VERSIONS)
family.setMinVersions(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MIN_VERSIONS)
family.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::KEEP_DELETED_CELLS)
family.setCompressTags(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESS_TAGS)
family.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::PREFETCH_BLOCKS_ON_OPEN)
family.setMobEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IS_MOB)
family.setMobThreshold(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_THRESHOLD)
family.setNewVersionBehavior(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::NEW_VERSION_BEHAVIOR)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER)
bloomtype = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOOMFILTER).upcase.to_sym
cfdb.setTimeToLive(arg.delete(ColumnFamilyDescriptorBuilder::TTL)) if arg.include?(ColumnFamilyDescriptorBuilder::TTL)
cfdb.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING))) if arg.include?(ColumnFamilyDescriptorBuilder::DATA_BLOCK_ENCODING)
cfdb.setBlocksize(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::BLOCKSIZE))) if arg.include?(ColumnFamilyDescriptorBuilder::BLOCKSIZE)
cfdb.setMaxVersions(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::VERSIONS))) if arg.include?(ColumnFamilyDescriptorBuilder::VERSIONS)
cfdb.setMinVersions(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::MIN_VERSIONS))) if arg.include?(ColumnFamilyDescriptorBuilder::MIN_VERSIONS)
cfdb.setKeepDeletedCells(org.apache.hadoop.hbase.KeepDeletedCells.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS).to_s.upcase)) if arg.include?(ColumnFamilyDescriptorBuilder::KEEP_DELETED_CELLS)
cfdb.setCompressTags(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::COMPRESS_TAGS))) if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESS_TAGS)
cfdb.setPrefetchBlocksOnOpen(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN))) if arg.include?(ColumnFamilyDescriptorBuilder::PREFETCH_BLOCKS_ON_OPEN)
cfdb.setMobEnabled(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::IS_MOB))) if arg.include?(ColumnFamilyDescriptorBuilder::IS_MOB)
cfdb.setMobThreshold(JLong.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::MOB_THRESHOLD))) if arg.include?(ColumnFamilyDescriptorBuilder::MOB_THRESHOLD)
cfdb.setNewVersionBehavior(JBoolean.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR))) if arg.include?(ColumnFamilyDescriptorBuilder::NEW_VERSION_BEHAVIOR)
if arg.include?(ColumnFamilyDescriptorBuilder::BLOOMFILTER)
bloomtype = arg.delete(ColumnFamilyDescriptorBuilder::BLOOMFILTER).upcase.to_sym
if org.apache.hadoop.hbase.regionserver.BloomType.constants.include?(bloomtype)
family.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype))
cfdb.setBloomFilterType(org.apache.hadoop.hbase.regionserver.BloomType.valueOf(bloomtype))
else
raise(ArgumentError, "BloomFilter type #{bloomtype} is not supported. Use one of " + org.apache.hadoop.hbase.regionserver.StoreFile::BloomType.constants.join(' '))
end
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION)
compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION).upcase.to_sym
if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION)
compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION).upcase.to_sym
if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
family.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
cfdb.setCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
else
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' '))
end
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION)
algorithm = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION).upcase
family.setEncryptionType(algorithm)
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY)
if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION)
algorithm = arg.delete(ColumnFamilyDescriptorBuilder::ENCRYPTION).upcase
cfdb.setEncryptionType(algorithm)
if arg.include?(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY)
key = org.apache.hadoop.hbase.io.crypto.Encryption.pbkdf128(
arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::ENCRYPTION_KEY)
arg.delete(ColumnFamilyDescriptorBuilder::ENCRYPTION_KEY)
)
family.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key,
cfdb.setEncryptionKey(org.apache.hadoop.hbase.security.EncryptionUtil.wrapKey(@conf, key,
algorithm))
end
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT)
compression = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::COMPRESSION_COMPACT).upcase.to_sym
if arg.include?(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT)
compression = arg.delete(ColumnFamilyDescriptorBuilder::COMPRESSION_COMPACT).upcase.to_sym
if org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.include?(compression)
family.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
cfdb.setCompactionCompressionType(org.apache.hadoop.hbase.io.compress.Compression::Algorithm.valueOf(compression))
else
raise(ArgumentError, "Compression #{compression} is not supported. Use one of " + org.apache.hadoop.hbase.io.compress.Compression::Algorithm.constants.join(' '))
end
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY)
storage_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::STORAGE_POLICY).upcase
family.setStoragePolicy(storage_policy)
if arg.include?(ColumnFamilyDescriptorBuilder::STORAGE_POLICY)
storage_policy = arg.delete(ColumnFamilyDescriptorBuilder::STORAGE_POLICY).upcase
cfdb.setStoragePolicy(storage_policy)
end
if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY)
mob_partition_policy = arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym
if org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.include?(mob_partition_policy)
family.setMobCompactPartitionPolicy(org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.valueOf(mob_partition_policy))
if arg.include?(ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY)
mob_partition_policy = arg.delete(ColumnFamilyDescriptorBuilder::MOB_COMPACT_PARTITION_POLICY).upcase.to_sym
if MobCompactPartitionPolicy.constants.include?(mob_partition_policy)
cfdb.setMobCompactPartitionPolicy(MobCompactPartitionPolicy.valueOf(mob_partition_policy))
else
raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + org.apache.hadoop.hbase.client.MobCompactPartitionPolicy.constants.join(' '))
raise(ArgumentError, "MOB_COMPACT_PARTITION_POLICY #{mob_partition_policy} is not supported. Use one of " + MobCompactPartitionPolicy.constants.join(' '))
end
end
set_user_metadata(family, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(family, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
if arg.include?(org.apache.hadoop.hbase
.HColumnDescriptor::DFS_REPLICATION)
family.setDFSReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase
.HColumnDescriptor::DFS_REPLICATION)))
set_user_metadata(cfdb, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(cfdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
if arg.include?(ColumnFamilyDescriptorBuilder::DFS_REPLICATION)
cfdb.setDFSReplication(JInteger.valueOf(arg.delete(ColumnFamilyDescriptorBuilder::DFS_REPLICATION)))
end
arg.each_key do |unknown_key|
puts(format('Unknown argument ignored for column family %s: %s', name, unknown_key))
end
family
cfdb.build
end
# Apply user metadata to table/column descriptor
@ -1234,7 +1240,7 @@ module Hbase
raise(ArgumentError, "#{CONFIGURATION} must be a Hash type") unless config.is_a?(Hash)
for k, v in config
v = v.to_s unless v.nil?
descriptor.setConfiguration(k, v)
descriptor.setValue(k, v)
end
end
@ -1359,26 +1365,26 @@ module Hbase
@admin.getLocks
end
# Parse arguments and update HTableDescriptor accordingly
def update_htd_from_arg(htd, arg)
htd.setOwnerString(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::OWNER)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::OWNER)
htd.setMaxFileSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MAX_FILESIZE)
htd.setReadOnly(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::READONLY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::READONLY)
htd.setCompactionEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::COMPACTION_ENABLED)
htd.setSplitEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_ENABLED)
htd.setMergeEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MERGE_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MERGE_ENABLED)
htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZATION_ENABLED)
htd.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_COUNT)
htd.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::NORMALIZER_TARGET_REGION_SIZE)
htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::MEMSTORE_FLUSHSIZE)
htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::DURABILITY)
htd.setPriority(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::PRIORITY)
htd.setFlushPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::FLUSH_POLICY)
htd.setRegionMemstoreReplication(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_MEMSTORE_REPLICATION)
htd.setRegionSplitPolicyClassName(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::SPLIT_POLICY)
htd.setRegionReplication(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION))) if arg.include?(org.apache.hadoop.hbase.HTableDescriptor::REGION_REPLICATION)
set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
# Parse arguments and update TableDescriptorBuilder accordingly
def update_tdb_from_arg(tdb, arg)
tdb.setOwnerString(arg.delete(TableDescriptorBuilder::OWNER)) if arg.include?(TableDescriptorBuilder::OWNER)
tdb.setMaxFileSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MAX_FILESIZE))) if arg.include?(TableDescriptorBuilder::MAX_FILESIZE)
tdb.setReadOnly(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::READONLY))) if arg.include?(TableDescriptorBuilder::READONLY)
tdb.setCompactionEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::COMPACTION_ENABLED))) if arg.include?(TableDescriptorBuilder::COMPACTION_ENABLED)
tdb.setSplitEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::SPLIT_ENABLED))) if arg.include?(TableDescriptorBuilder::SPLIT_ENABLED)
tdb.setMergeEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::MERGE_ENABLED))) if arg.include?(TableDescriptorBuilder::MERGE_ENABLED)
tdb.setNormalizationEnabled(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZATION_ENABLED))) if arg.include?(TableDescriptorBuilder::NORMALIZATION_ENABLED)
tdb.setNormalizerTargetRegionCount(JInteger.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_COUNT)
tdb.setNormalizerTargetRegionSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE))) if arg.include?(TableDescriptorBuilder::NORMALIZER_TARGET_REGION_SIZE)
tdb.setMemStoreFlushSize(JLong.valueOf(arg.delete(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE))) if arg.include?(TableDescriptorBuilder::MEMSTORE_FLUSHSIZE)
tdb.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(TableDescriptorBuilder::DURABILITY))) if arg.include?(TableDescriptorBuilder::DURABILITY)
tdb.setPriority(JInteger.valueOf(arg.delete(TableDescriptorBuilder::PRIORITY))) if arg.include?(TableDescriptorBuilder::PRIORITY)
tdb.setFlushPolicyClassName(arg.delete(TableDescriptorBuilder::FLUSH_POLICY)) if arg.include?(TableDescriptorBuilder::FLUSH_POLICY)
tdb.setRegionMemStoreReplication(JBoolean.valueOf(arg.delete(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION))) if arg.include?(TableDescriptorBuilder::REGION_MEMSTORE_REPLICATION)
tdb.setRegionSplitPolicyClassName(arg.delete(TableDescriptorBuilder::SPLIT_POLICY)) if arg.include?(TableDescriptorBuilder::SPLIT_POLICY)
tdb.setRegionReplication(JInteger.valueOf(arg.delete(TableDescriptorBuilder::REGION_REPLICATION))) if arg.include?(TableDescriptorBuilder::REGION_REPLICATION)
set_user_metadata(tdb, arg.delete(METADATA)) if arg[METADATA]
set_descriptor_config(tdb, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
end
#----------------------------------------------------------------------------------------------

View File

@ -66,10 +66,10 @@ module Hbase
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name)
htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getDescriptor(tableName))
td = @admin.getDescriptor(tableName)
unless family.nil?
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
raise(ArgumentError, "Can't find a family: #{family}") unless td.hasColumnFamily(family.to_java_bytes)
end
fambytes = family.to_java_bytes unless family.nil?
@ -111,10 +111,10 @@ module Hbase
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name)
htd = org.apache.hadoop.hbase.HTableDescriptor.new(@admin.getDescriptor(tableName))
td = @admin.getDescriptor(tableName)
unless family.nil?
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
raise(ArgumentError, "Can't find a family: #{family}") unless td.hasColumnFamily(family.to_java_bytes)
end
fambytes = family.to_java_bytes unless family.nil?

View File

@ -39,7 +39,7 @@ module HBaseConstants
NAME = org.apache.hadoop.hbase.HConstants::NAME
VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION
IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder::IN_MEMORY_COMPACTION
METADATA = org.apache.hadoop.hbase.HConstants::METADATA
STOPROW = 'STOPROW'.freeze
STARTROW = 'STARTROW'.freeze
@ -98,6 +98,9 @@ module HBaseConstants
FORMATTER_CLASS = 'FORMATTER_CLASS'.freeze
POLICY = 'POLICY'.freeze
REGIONSERVER = 'REGIONSERVER'.freeze
REPLICATION_SCOPE_BYTES = org.apache.hadoop.hbase.client.ColumnFamilyDescriptor::REPLICATION_SCOPE_BYTES
FOREVER = org.apache.hadoop.hbase.HConstants::FOREVER
IS_ROOT = 'IS_ROOT'.freeze
# Load constants from hbase java API
def self.promote_constants(constants)
@ -109,8 +112,8 @@ module HBaseConstants
end
end
promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants)
promote_constants(org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.constants)
promote_constants(org.apache.hadoop.hbase.client.TableDescriptorBuilder.constants)
end
# Include classes definition

View File

@ -808,6 +808,27 @@ module Hbase
assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
end
define_test "alter should raise error trying to remove nonexistent attributes" do
drop_test_table(@test_name)
create_test_table(@test_name)
key_1 = "TestAttr1"
key_2 = "TestAttr2"
assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
# first, try removing just one nonexistent attribute
assert_raise(ArgumentError) do
command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => key_1)
end
# second, try removing multiple nonexistent attributes
assert_raise(ArgumentError) do
command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ])
end
end
define_test "alter should be able to remove a table configuration" do
drop_test_table(@test_name)
create_test_table(@test_name)
@ -838,6 +859,26 @@ module Hbase
assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
end
define_test "alter should raise error trying to remove nonexistent configurations" do
drop_test_table(@test_name)
create_test_table(@test_name)
key_1 = "TestConf1"
key_2 = "TestConf2"
assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
# first, try removing just one nonexistent configuration
assert_raise(ArgumentError) do
command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => key_1)
end
# second, try removing multiple nonexistent configurations
assert_raise(ArgumentError) do
command(:alter, @test_name, 'METHOD' => 'table_conf_unset', 'NAME' => [ key_1, key_2 ])
end
end
define_test "get_table should get a real table" do
drop_test_table(@test_name)
create_test_table(@test_name)