HBASE-18239 rubocop autocorrect for shell

Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
Mike Drob 2017-06-20 15:24:29 -05:00 committed by Michael Stack
parent 90fee695b4
commit 2115d4b50e
140 changed files with 1448 additions and 1526 deletions

File diff suppressed because it is too large Load Diff

View File

@ -39,22 +39,22 @@ module Hbase
else
self.configuration = HBaseConfiguration.create
# Turn off retries in hbase and ipc. Human doesn't want to wait on N retries.
configuration.setInt("hbase.client.retries.number", 7)
configuration.setInt("hbase.ipc.client.connect.max.retries", 3)
configuration.setInt('hbase.client.retries.number', 7)
configuration.setInt('hbase.ipc.client.connect.max.retries', 3)
end
@connection = ConnectionFactory.createConnection(self.configuration)
@connection = ConnectionFactory.createConnection(configuration)
end
# Returns ruby's Admin class from admin.rb
def admin()
def admin
::Hbase::Admin.new(@connection)
end
def rsgroup_admin()
def rsgroup_admin
::Hbase::RSGroupAdmin.new(@connection)
end
def taskmonitor()
def taskmonitor
::Hbase::TaskMonitor.new(configuration)
end
@ -63,19 +63,19 @@ module Hbase
::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell)
end
def replication_admin()
def replication_admin
::Hbase::RepAdmin.new(configuration)
end
def security_admin()
def security_admin
::Hbase::SecurityAdmin.new(@connection.getAdmin)
end
def visibility_labels_admin()
def visibility_labels_admin
::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin)
end
def quotas_admin()
def quotas_admin
::Hbase::QuotasAdmin.new(@connection.getAdmin)
end

View File

@ -30,18 +30,18 @@ java_import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
module HBaseQuotasConstants
# RPC Quota constants
GLOBAL_BYPASS = 'GLOBAL_BYPASS'
THROTTLE_TYPE = 'THROTTLE_TYPE'
THROTTLE = 'THROTTLE'
REQUEST = 'REQUEST'
WRITE = 'WRITE'
READ = 'READ'
GLOBAL_BYPASS = 'GLOBAL_BYPASS'.freeze
THROTTLE_TYPE = 'THROTTLE_TYPE'.freeze
THROTTLE = 'THROTTLE'.freeze
REQUEST = 'REQUEST'.freeze
WRITE = 'WRITE'.freeze
READ = 'READ'.freeze
# Space quota constants
SPACE = 'SPACE'
NO_INSERTS = 'NO_INSERTS'
NO_WRITES = 'NO_WRITES'
NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'
DISABLE = 'DISABLE'
SPACE = 'SPACE'.freeze
NO_INSERTS = 'NO_INSERTS'.freeze
NO_WRITES = 'NO_WRITES'.freeze
NO_WRITES_COMPACTIONS = 'NO_WRITES_COMPACTIONS'.freeze
DISABLE = 'DISABLE'.freeze
end
module Hbase
@ -55,88 +55,88 @@ module Hbase
end
def throttle(args)
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
type = args.fetch(THROTTLE_TYPE, REQUEST)
args.delete(THROTTLE_TYPE)
type, limit, time_unit = _parse_limit(args.delete(LIMIT), ThrottleType, type)
if args.has_key?(USER)
if args.key?(USER)
user = args.delete(USER)
if args.has_key?(TABLE)
if args.key?(TABLE)
table = TableName.valueOf(args.delete(TABLE))
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.throttleUser(user, table, type, limit, time_unit)
elsif args.has_key?(NAMESPACE)
elsif args.key?(NAMESPACE)
namespace = args.delete(NAMESPACE)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.throttleUser(user, namespace, type, limit, time_unit)
else
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.throttleUser(user, type, limit, time_unit)
end
elsif args.has_key?(TABLE)
elsif args.key?(TABLE)
table = TableName.valueOf(args.delete(TABLE))
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.throttleTable(table, type, limit, time_unit)
elsif args.has_key?(NAMESPACE)
elsif args.key?(NAMESPACE)
namespace = args.delete(NAMESPACE)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.throttleNamespace(namespace, type, limit, time_unit)
else
raise "One of USER, TABLE or NAMESPACE must be specified"
raise 'One of USER, TABLE or NAMESPACE must be specified'
end
@admin.setQuota(settings)
end
def unthrottle(args)
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
if args.has_key?(USER)
raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
if args.key?(USER)
user = args.delete(USER)
if args.has_key?(TABLE)
if args.key?(TABLE)
table = TableName.valueOf(args.delete(TABLE))
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.unthrottleUser(user, table)
elsif args.has_key?(NAMESPACE)
elsif args.key?(NAMESPACE)
namespace = args.delete(NAMESPACE)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.unthrottleUser(user, namespace)
else
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.unthrottleUser(user)
end
elsif args.has_key?(TABLE)
elsif args.key?(TABLE)
table = TableName.valueOf(args.delete(TABLE))
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.unthrottleTable(table)
elsif args.has_key?(NAMESPACE)
elsif args.key?(NAMESPACE)
namespace = args.delete(NAMESPACE)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.unthrottleNamespace(namespace)
else
raise "One of USER, TABLE or NAMESPACE must be specified"
raise 'One of USER, TABLE or NAMESPACE must be specified'
end
@admin.setQuota(settings)
end
def limit_space(args)
raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? and args.kind_of?(Hash))
raise(ArgumentError, 'Argument should be a Hash') unless !args.nil? && args.is_a?(Hash)
# Let the user provide a raw number
if args[LIMIT].is_a?(Numeric)
limit = args[LIMIT]
else
# Parse a string a 1K, 2G, etc.
limit = _parse_size(args[LIMIT])
end
limit = if args[LIMIT].is_a?(Numeric)
args[LIMIT]
else
# Parse a string a 1K, 2G, etc.
_parse_size(args[LIMIT])
end
# Extract the policy, failing if something bogus was provided
policy = SpaceViolationPolicy.valueOf(args[POLICY])
# Create a table or namespace quota
if args.key?(TABLE)
if args.key?(NAMESPACE)
raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
end
settings = QuotaSettingsFactory.limitTableSpace(TableName.valueOf(args.delete(TABLE)), limit, policy)
elsif args.key?(NAMESPACE)
if args.key?(TABLE)
raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
end
settings = QuotaSettingsFactory.limitNamespaceSpace(args.delete(NAMESPACE), limit, policy)
else
@ -147,16 +147,16 @@ module Hbase
end
def remove_space_limit(args)
raise(ArgumentError, 'Argument should be a Hash') unless (not args.nil? and args.kind_of?(Hash))
raise(ArgumentError, 'Argument should be a Hash') unless !args.nil? && args.is_a?(Hash)
if args.key?(TABLE)
if args.key?(NAMESPACE)
raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
end
table = TableName.valueOf(args.delete(TABLE))
settings = QuotaSettingsFactory.removeTableSpaceLimit(table)
elsif args.key?(NAMESPACE)
if args.key?(TABLE)
raise(ArgumentError, "Only one of TABLE or NAMESPACE can be specified.")
raise(ArgumentError, 'Only one of TABLE or NAMESPACE can be specified.')
end
settings = QuotaSettingsFactory.removeNamespaceSpaceLimit(args.delete(NAMESPACE))
else
@ -165,52 +165,52 @@ module Hbase
@admin.setQuota(settings)
end
def get_master_table_sizes()
QuotaTableUtil.getMasterReportedTableSizes(@admin.getConnection())
def get_master_table_sizes
QuotaTableUtil.getMasterReportedTableSizes(@admin.getConnection)
end
def get_quota_snapshots(regionserver=nil)
def get_quota_snapshots(regionserver = nil)
# Ask a regionserver if we were given one
return get_rs_quota_snapshots(regionserver) if regionserver
# Otherwise, read from the quota table
get_quota_snapshots_from_table
end
def get_quota_snapshots_from_table()
def get_quota_snapshots_from_table
# Reads the snapshots from the hbase:quota table
QuotaTableUtil.getSnapshots(@admin.getConnection())
QuotaTableUtil.getSnapshots(@admin.getConnection)
end
def get_rs_quota_snapshots(rs)
# Reads the snapshots from a specific regionserver
QuotaTableUtil.getRegionServerQuotaSnapshots(@admin.getConnection(),
ServerName.valueOf(rs))
QuotaTableUtil.getRegionServerQuotaSnapshots(@admin.getConnection,
ServerName.valueOf(rs))
end
def set_global_bypass(bypass, args)
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
if args.has_key?(USER)
if args.key?(USER)
user = args.delete(USER)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
settings = QuotaSettingsFactory.bypassGlobals(user, bypass)
else
raise "Expected USER"
raise 'Expected USER'
end
@admin.setQuota(settings)
end
def list_quotas(args = {})
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
limit = args.delete("LIMIT") || -1
limit = args.delete('LIMIT') || -1
count = 0
filter = QuotaFilter.new()
filter.setUserFilter(args.delete(USER)) if args.has_key?(USER)
filter.setTableFilter(args.delete(TABLE)) if args.has_key?(TABLE)
filter.setNamespaceFilter(args.delete(NAMESPACE)) if args.has_key?(NAMESPACE)
raise(ArgumentError, "Unexpected arguments: " + args.inspect) unless args.empty?
filter = QuotaFilter.new
filter.setUserFilter(args.delete(USER)) if args.key?(USER)
filter.setTableFilter(args.delete(TABLE)) if args.key?(TABLE)
filter.setNamespaceFilter(args.delete(NAMESPACE)) if args.key?(NAMESPACE)
raise(ArgumentError, 'Unexpected arguments: ' + args.inspect) unless args.empty?
# Start the scanner
scanner = @admin.getQuotaRetriever(filter)
@ -219,30 +219,28 @@ module Hbase
# Iterate results
while iter.hasNext
if limit > 0 && count >= limit
break
end
break if limit > 0 && count >= limit
settings = iter.next
owner = {
USER => settings.getUserName(),
TABLE => settings.getTableName(),
NAMESPACE => settings.getNamespace(),
}.delete_if { |k, v| v.nil? }.map {|k, v| k.to_s + " => " + v.to_s} * ', '
USER => settings.getUserName,
TABLE => settings.getTableName,
NAMESPACE => settings.getNamespace
}.delete_if { |_k, v| v.nil? }.map { |k, v| k.to_s + ' => ' + v.to_s } * ', '
yield owner, settings.to_s
count += 1
end
ensure
scanner.close()
scanner.close
end
return count
count
end
def list_snapshot_sizes()
QuotaTableUtil.getObservedSnapshotSizes(@admin.getConnection())
def list_snapshot_sizes
QuotaTableUtil.getObservedSnapshotSizes(@admin.getConnection)
end
def _parse_size(str_limit)
@ -255,7 +253,7 @@ module Hbase
return _size_from_str(match[1].to_i, match[2])
end
else
raise(ArgumentError, "Invalid size limit syntax")
raise(ArgumentError, 'Invalid size limit syntax')
end
end
@ -265,38 +263,38 @@ module Hbase
if match
if match[2] == 'req'
limit = match[1].to_i
type = type_cls.valueOf(type + "_NUMBER")
type = type_cls.valueOf(type + '_NUMBER')
else
limit = _size_from_str(match[1].to_i, match[2])
type = type_cls.valueOf(type + "_SIZE")
type = type_cls.valueOf(type + '_SIZE')
end
if limit <= 0
raise(ArgumentError, "Invalid throttle limit, must be greater then 0")
raise(ArgumentError, 'Invalid throttle limit, must be greater then 0')
end
case match[3]
when 'sec' then time_unit = TimeUnit::SECONDS
when 'min' then time_unit = TimeUnit::MINUTES
when 'hour' then time_unit = TimeUnit::HOURS
when 'day' then time_unit = TimeUnit::DAYS
when 'sec' then time_unit = TimeUnit::SECONDS
when 'min' then time_unit = TimeUnit::MINUTES
when 'hour' then time_unit = TimeUnit::HOURS
when 'day' then time_unit = TimeUnit::DAYS
end
return type, limit, time_unit
else
raise(ArgumentError, "Invalid throttle limit syntax")
raise(ArgumentError, 'Invalid throttle limit syntax')
end
end
def _size_from_str(value, suffix)
case suffix
when 'k' then value <<= 10
when 'm' then value <<= 20
when 'g' then value <<= 30
when 't' then value <<= 40
when 'p' then value <<= 50
when 'k' then value <<= 10
when 'm' then value <<= 20
when 'g' then value <<= 30
when 't' then value <<= 40
when 'p' then value <<= 50
end
return value
value
end
end
end

View File

@ -43,7 +43,7 @@ module Hbase
def add_peer(id, args = {}, peer_tableCFs = nil)
if args.is_a?(Hash)
unless peer_tableCFs.nil?
raise(ArgumentError, "peer_tableCFs should be specified as TABLE_CFS in args")
raise(ArgumentError, 'peer_tableCFs should be specified as TABLE_CFS in args')
end
endpoint_classname = args.fetch(ENDPOINT_CLASSNAME, nil)
@ -51,12 +51,12 @@ module Hbase
# Handle cases where custom replication endpoint and cluster key are either both provided
# or neither are provided
if endpoint_classname.nil? and cluster_key.nil?
raise(ArgumentError, "Either ENDPOINT_CLASSNAME or CLUSTER_KEY must be specified.")
if endpoint_classname.nil? && cluster_key.nil?
raise(ArgumentError, 'Either ENDPOINT_CLASSNAME or CLUSTER_KEY must be specified.')
end
# Cluster Key is required for ReplicationPeerConfig for a custom replication endpoint
if !endpoint_classname.nil? and cluster_key.nil?
if !endpoint_classname.nil? && cluster_key.nil?
cluster_key = ZKConfig.getZooKeeperClusterKey(@configuration)
end
@ -81,9 +81,9 @@ module Hbase
unless data.nil?
# Convert Strings to Bytes for peer_data
peer_data = replication_peer_config.get_peer_data
data.each{|key, val|
data.each do |key, val|
peer_data.put(Bytes.to_bytes(key), Bytes.to_bytes(val))
}
end
end
unless namespaces.nil?
@ -97,14 +97,14 @@ module Hbase
unless table_cfs.nil?
# convert table_cfs to TableName
map = java.util.HashMap.new
table_cfs.each{|key, val|
table_cfs.each do |key, val|
map.put(org.apache.hadoop.hbase.TableName.valueOf(key), val)
}
end
replication_peer_config.set_table_cfs_map(map)
end
@admin.addReplicationPeer(id, replication_peer_config)
else
raise(ArgumentError, "args must be a Hash")
raise(ArgumentError, 'args must be a Hash')
end
end
@ -116,10 +116,10 @@ module Hbase
#---------------------------------------------------------------------------------------------
# Show replcated tables/column families, and their ReplicationType
def list_replicated_tables(regex = ".*")
def list_replicated_tables(regex = '.*')
pattern = java.util.regex.Pattern.compile(regex)
list = @admin.listReplicatedTableCFs()
list.select {|t| pattern.match(t.getTable().getNameAsString())}
list = @admin.listReplicatedTableCFs
list.select { |t| pattern.match(t.getTable.getNameAsString) }
end
#----------------------------------------------------------------------------------------------
@ -144,7 +144,7 @@ module Hbase
# Show the current tableCFs config for the specified peer
def show_peer_tableCFs(id)
rpc = @admin.getReplicationPeerConfig(id)
ReplicationSerDeHelper.convertToString(rpc.getTableCFsMap())
ReplicationSerDeHelper.convertToString(rpc.getTableCFsMap)
end
#----------------------------------------------------------------------------------------------
@ -153,9 +153,9 @@ module Hbase
unless tableCFs.nil?
# convert tableCFs to TableName
map = java.util.HashMap.new
tableCFs.each{|key, val|
tableCFs.each do |key, val|
map.put(org.apache.hadoop.hbase.TableName.valueOf(key), val)
}
end
rpc = get_peer_config(id)
unless rpc.nil?
rpc.setTableCFsMap(map)
@ -170,9 +170,9 @@ module Hbase
unless tableCFs.nil?
# convert tableCFs to TableName
map = java.util.HashMap.new
tableCFs.each{|key, val|
tableCFs.each do |key, val|
map.put(org.apache.hadoop.hbase.TableName.valueOf(key), val)
}
end
end
@admin.appendReplicationPeerTableCFs(id, map)
end
@ -183,9 +183,9 @@ module Hbase
unless tableCFs.nil?
# convert tableCFs to TableName
map = java.util.HashMap.new
tableCFs.each{|key, val|
tableCFs.each do |key, val|
map.put(org.apache.hadoop.hbase.TableName.valueOf(key), val)
}
end
end
@admin.removeReplicationPeerTableCFs(id, map)
end
@ -210,10 +210,8 @@ module Hbase
unless namespaces.nil?
rpc = get_peer_config(id)
unless rpc.nil?
ns_set = rpc.getNamespaces()
if ns_set.nil?
ns_set = java.util.HashSet.new
end
ns_set = rpc.getNamespaces
ns_set = java.util.HashSet.new if ns_set.nil?
namespaces.each do |n|
ns_set.add(n)
end
@ -228,7 +226,7 @@ module Hbase
unless namespaces.nil?
rpc = get_peer_config(id)
unless rpc.nil?
ns_set = rpc.getNamespaces()
ns_set = rpc.getNamespaces
unless ns_set.nil?
namespaces.each do |n|
ns_set.remove(n)
@ -281,14 +279,14 @@ module Hbase
peers.each do |peer|
map.put(peer.getPeerId, peer.getPeerConfig)
end
return map
map
end
def get_peer_config(id)
@admin.getReplicationPeerConfig(id)
end
def update_peer_config(id, args={})
def update_peer_config(id, args = {})
# Optional parameters
config = args.fetch(CONFIG, nil)
data = args.fetch(DATA, nil)
@ -302,9 +300,9 @@ module Hbase
unless data.nil?
# Convert Strings to Bytes for peer_data
peer_data = replication_peer_config.get_peer_data
data.each{|key, val|
data.each do |key, val|
peer_data.put(Bytes.to_bytes(key), Bytes.to_bytes(val))
}
end
end
@admin.updateReplicationPeerConfig(id, replication_peer_config)

View File

@ -36,21 +36,17 @@ module Hbase
#--------------------------------------------------------------------------
# Returns a list of groups in hbase
def list_rs_groups
@admin.listRSGroups.map { |g| g.getName }
@admin.listRSGroups.map(&:getName)
end
#--------------------------------------------------------------------------
# get a group's information
def get_rsgroup(group_name)
group = @admin.getRSGroupInfo(group_name)
if group.nil?
raise(ArgumentError, 'Group does not exist: ' + group_name)
end
raise(ArgumentError, 'Group does not exist: ' + group_name) if group.nil?
res = {}
if block_given?
yield('Servers:')
end
yield('Servers:') if block_given?
servers = []
group.getServers.each do |v|
@ -63,9 +59,7 @@ module Hbase
res[:servers] = servers
tables = []
if block_given?
yield('Tables:')
end
yield('Tables:') if block_given?
group.getTables.each do |v|
if block_given?
yield(v.toString)
@ -75,11 +69,7 @@ module Hbase
end
res[:tables] = tables
if !block_given?
res
else
nil
end
res unless block_given?
end
#--------------------------------------------------------------------------
@ -113,7 +103,7 @@ module Hbase
#--------------------------------------------------------------------------
# move server to a group
def move_tables(dest, *args)
tables = java.util.HashSet.new;
tables = java.util.HashSet.new
args[0].each do |s|
tables.add(org.apache.hadoop.hbase.TableName.valueOf(s))
end
@ -124,10 +114,9 @@ module Hbase
# get group of server
def get_rsgroup_of_server(server)
res = @admin.getRSGroupOfServer(
org.apache.hadoop.hbase.net.Address.fromString(server))
if res.nil?
raise(ArgumentError,'Server has no group: ' + server)
end
org.apache.hadoop.hbase.net.Address.fromString(server)
)
raise(ArgumentError, 'Server has no group: ' + server) if res.nil?
res
end
@ -135,10 +124,9 @@ module Hbase
# get group of table
def get_rsgroup_of_table(table)
res = @admin.getRSGroupInfoOfTable(
org.apache.hadoop.hbase.TableName.valueOf(table))
if res.nil?
raise(ArgumentError,'Table has no group: ' + table)
end
org.apache.hadoop.hbase.TableName.valueOf(table)
)
raise(ArgumentError, 'Table has no group: ' + table) if res.nil?
res
end
@ -146,7 +134,7 @@ module Hbase
# move server and table to a group
def move_servers_tables(dest, *args)
servers = java.util.HashSet.new
tables = java.util.HashSet.new;
tables = java.util.HashSet.new
args[0].each do |s|
servers.add(org.apache.hadoop.hbase.net.Address.fromString(s))
end
@ -155,6 +143,5 @@ module Hbase
end
@admin.moveServersAndTables(servers, tables, dest)
end
end
end

View File

@ -26,7 +26,7 @@ module Hbase
def initialize(admin)
@admin = admin
@connection = @admin.getConnection()
@connection = @admin.getConnection
end
def close
@ -34,31 +34,33 @@ module Hbase
end
#----------------------------------------------------------------------------------------------
def grant(user, permissions, table_name=nil, family=nil, qualifier=nil)
def grant(user, permissions, table_name = nil, family = nil, qualifier = nil)
security_available?
# TODO: need to validate user name
begin
# Verify that the specified permission is valid
if (permissions == nil || permissions.length == 0)
raise(ArgumentError, "Invalid permission: no actions associated with user")
if permissions.nil? || permissions.empty?
raise(ArgumentError, 'Invalid permission: no actions associated with user')
end
perm = org.apache.hadoop.hbase.security.access.Permission.new(
permissions.to_java_bytes)
permissions.to_java_bytes
)
if (table_name != nil)
tablebytes=table_name.to_java_bytes
#check if the tablename passed is actually a namespace
if (isNamespace?(table_name))
if !table_name.nil?
tablebytes = table_name.to_java_bytes
# check if the tablename passed is actually a namespace
if isNamespace?(table_name)
# Namespace should exist first.
namespace_name = table_name[1...table_name.length]
raise(ArgumentError, "Can't find a namespace: #{namespace_name}") unless
namespace_exists?(namespace_name)
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(
@connection, namespace_name, user, perm.getActions())
@connection, namespace_name, user, perm.getActions
)
else
# Table should exist
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
@ -66,100 +68,106 @@ module Hbase
tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name)
htd = @admin.getTableDescriptor(tableName)
if (family != nil)
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
unless family.nil?
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
end
fambytes = family.to_java_bytes if (family != nil)
qualbytes = qualifier.to_java_bytes if (qualifier != nil)
fambytes = family.to_java_bytes unless family.nil?
qualbytes = qualifier.to_java_bytes unless qualifier.nil?
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(
@connection, tableName, user, fambytes, qualbytes, perm.getActions())
@connection, tableName, user, fambytes, qualbytes, perm.getActions
)
end
else
# invoke cp endpoint to perform access controls
org.apache.hadoop.hbase.security.access.AccessControlClient.grant(
@connection, user, perm.getActions())
@connection, user, perm.getActions
)
end
end
end
#----------------------------------------------------------------------------------------------
def revoke(user, table_name=nil, family=nil, qualifier=nil)
def revoke(user, table_name = nil, family = nil, qualifier = nil)
security_available?
# TODO: need to validate user name
begin
if (table_name != nil)
#check if the tablename passed is actually a namespace
if (isNamespace?(table_name))
if !table_name.nil?
# check if the tablename passed is actually a namespace
if isNamespace?(table_name)
# Namespace should exist first.
namespace_name = table_name[1...table_name.length]
raise(ArgumentError, "Can't find a namespace: #{namespace_name}") unless namespace_exists?(namespace_name)
tablebytes=table_name.to_java_bytes
tablebytes = table_name.to_java_bytes
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(
@connection, namespace_name, user)
@connection, namespace_name, user
)
else
# Table should exist
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
# Table should exist
raise(ArgumentError, "Can't find a table: #{table_name}") unless exists?(table_name)
tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name)
htd = @admin.getTableDescriptor(tableName)
tableName = org.apache.hadoop.hbase.TableName.valueOf(table_name)
htd = @admin.getTableDescriptor(tableName)
if (family != nil)
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
end
unless family.nil?
raise(ArgumentError, "Can't find a family: #{family}") unless htd.hasFamily(family.to_java_bytes)
end
fambytes = family.to_java_bytes if (family != nil)
qualbytes = qualifier.to_java_bytes if (qualifier != nil)
fambytes = family.to_java_bytes unless family.nil?
qualbytes = qualifier.to_java_bytes unless qualifier.nil?
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(
@connection, tableName, user, fambytes, qualbytes)
@connection, tableName, user, fambytes, qualbytes
)
end
else
perm = org.apache.hadoop.hbase.security.access.Permission.new(''.to_java_bytes)
org.apache.hadoop.hbase.security.access.AccessControlClient.revoke(
@connection, user, perm.getActions())
@connection, user, perm.getActions
)
end
end
end
#----------------------------------------------------------------------------------------------
def user_permission(table_regex=nil)
def user_permission(table_regex = nil)
security_available?
all_perms = org.apache.hadoop.hbase.security.access.AccessControlClient.getUserPermissions(
@connection,table_regex)
@connection, table_regex
)
res = {}
count = 0
count = 0
all_perms.each do |value|
user_name = String.from_java_bytes(value.getUser)
if (table_regex != nil && isNamespace?(table_regex))
namespace = value.getNamespace()
else
namespace = (value.getTableName != nil) ? value.getTableName.getNamespaceAsString() : value.getNamespace()
end
table = (value.getTableName != nil) ? value.getTableName.getNameAsString() : ''
family = (value.getFamily != nil) ?
org.apache.hadoop.hbase.util.Bytes::toStringBinary(value.getFamily) :
''
qualifier = (value.getQualifier != nil) ?
org.apache.hadoop.hbase.util.Bytes::toStringBinary(value.getQualifier) :
''
user_name = String.from_java_bytes(value.getUser)
if !table_regex.nil? && isNamespace?(table_regex)
namespace = value.getNamespace
else
namespace = !value.getTableName.nil? ? value.getTableName.getNamespaceAsString : value.getNamespace
end
table = !value.getTableName.nil? ? value.getTableName.getNameAsString : ''
family = !value.getFamily.nil? ?
org.apache.hadoop.hbase.util.Bytes.toStringBinary(value.getFamily) :
''
qualifier = !value.getQualifier.nil? ?
org.apache.hadoop.hbase.util.Bytes.toStringBinary(value.getQualifier) :
''
action = org.apache.hadoop.hbase.security.access.Permission.new value.getActions
action = org.apache.hadoop.hbase.security.access.Permission.new value.getActions
if block_given?
yield(user_name, "#{namespace},#{table},#{family},#{qualifier}: #{action.to_s}")
else
res[user_name] ||= {}
res[user_name]["#{family}:#{qualifier}"] = action
end
count += 1
if block_given?
yield(user_name, "#{namespace},#{table},#{family},#{qualifier}: #{action}")
else
res[user_name] ||= {}
res[user_name]["#{family}:#{qualifier}"] = action
end
count += 1
end
return ((block_given?) ? count : res)
(block_given? ? count : res)
end
# Does table exist?
@ -171,15 +179,15 @@ module Hbase
table_name.start_with?('@')
end
# Does Namespace exist
# Does Namespace exist
def namespace_exists?(namespace_name)
return @admin.getNamespaceDescriptor(namespace_name) != nil
return !@admin.getNamespaceDescriptor(namespace_name).nil?
rescue org.apache.hadoop.hbase.NamespaceNotFoundException => e
return false
end
# Make sure that security features are available
def security_available?()
def security_available?
caps = []
begin
# Try the getSecurityCapabilities API where supported.
@ -189,11 +197,11 @@ module Hbase
rescue
# If we are unable to use getSecurityCapabilities, fall back with a check for
# deployment of the ACL table
raise(ArgumentError, "DISABLED: Security features are not available") unless \
raise(ArgumentError, 'DISABLED: Security features are not available') unless \
exists?(org.apache.hadoop.hbase.security.access.AccessControlLists::ACL_TABLE_NAME)
return
end
raise(ArgumentError, "DISABLED: Security features are not available") unless \
raise(ArgumentError, 'DISABLED: Security features are not available') unless \
caps.include? org.apache.hadoop.hbase.client.security.SecurityCapability::AUTHORIZATION
end
end

View File

@ -32,7 +32,7 @@ module Hbase
#
# e.g. name = scan, adds table.scan which calls Scan.scan
def self.add_shell_command(name)
self.add_command(name, name, name)
add_command(name, name, name)
end
# add a named command to the table instance
@ -42,18 +42,18 @@ module Hbase
# shell_command - name of the command in the shell
# internal_method_name - name of the method in the shell command to forward the call
def self.add_command(name, shell_command, internal_method_name)
method = name.to_sym
self.class_eval do
method = name.to_sym
class_eval do
define_method method do |*args|
@shell.internal_command(shell_command, internal_method_name, self, *args)
end
@shell.internal_command(shell_command, internal_method_name, self, *args)
end
end
end
# General help for the table
# class level so we can call it from anywhere
def self.help
return <<-EOF
<<-EOF
Help for table-reference commands.
You can either create a table via 'create' and then manipulate the table via commands like 'put', 'get', etc.
@ -113,13 +113,13 @@ EOF
def initialize(table, shell)
@table = table
@name = @table.getName().getNameAsString()
@name = @table.getName.getNameAsString
@shell = shell
@converters = Hash.new()
@converters = {}
end
def close()
@table.close()
def close
@table.close
end
# Note the below methods are prefixed with '_' to hide them from the average user, as
@ -131,21 +131,21 @@ EOF
p = org.apache.hadoop.hbase.client.Put.new(row.to_s.to_java_bytes)
family, qualifier = parse_column_name(column)
if args.any?
attributes = args[ATTRIBUTES]
set_attributes(p, attributes) if attributes
visibility = args[VISIBILITY]
set_cell_visibility(p, visibility) if visibility
ttl = args[TTL]
set_op_ttl(p, ttl) if ttl
attributes = args[ATTRIBUTES]
set_attributes(p, attributes) if attributes
visibility = args[VISIBILITY]
set_cell_visibility(p, visibility) if visibility
ttl = args[TTL]
set_op_ttl(p, ttl) if ttl
end
#Case where attributes are specified without timestamp
if timestamp.kind_of?(Hash)
timestamp.each do |k, v|
# Case where attributes are specified without timestamp
if timestamp.is_a?(Hash)
timestamp.each do |k, v|
if k == 'ATTRIBUTES'
set_attributes(p, v)
elsif k == 'VISIBILITY'
set_cell_visibility(p, v)
elsif k == "TTL"
elsif k == 'TTL'
set_op_ttl(p, v)
end
end
@ -162,36 +162,36 @@ EOF
#----------------------------------------------------------------------------------------------
# Create a Delete mutation
def _createdelete_internal(row, column = nil,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
temptimestamp = timestamp
if temptimestamp.kind_of?(Hash)
if temptimestamp.is_a?(Hash)
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP
end
d = org.apache.hadoop.hbase.client.Delete.new(row.to_s.to_java_bytes, timestamp)
if temptimestamp.kind_of?(Hash)
temptimestamp.each do |k, v|
if v.kind_of?(String)
if temptimestamp.is_a?(Hash)
temptimestamp.each do |_k, v|
if v.is_a?(String)
set_cell_visibility(d, v) if v
end
end
end
if args.any?
visibility = args[VISIBILITY]
set_cell_visibility(d, visibility) if visibility
visibility = args[VISIBILITY]
set_cell_visibility(d, visibility) if visibility
end
if column
family, qualifier = parse_column_name(column)
d.addColumns(family, qualifier, timestamp)
end
return d
d
end
#----------------------------------------------------------------------------------------------
# Delete rows using prefix
def _deleterows_internal(row, column = nil,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args={})
cache = row["CACHE"] ? row["CACHE"] : 100
prefix = row["ROWPREFIXFILTER"]
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
cache = row['CACHE'] ? row['CACHE'] : 100
prefix = row['ROWPREFIXFILTER']
# create scan to get table names using prefix
scan = org.apache.hadoop.hbase.client.Scan.new
@ -204,7 +204,7 @@ EOF
iter = scanner.iterator
while iter.hasNext
row = iter.next
key = org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow)
key = org.apache.hadoop.hbase.util.Bytes.toStringBinary(row.getRow)
d = _createdelete_internal(key, column, timestamp, args)
list.add(d)
if list.size >= cache
@ -218,20 +218,20 @@ EOF
#----------------------------------------------------------------------------------------------
# Delete a cell
def _delete_internal(row, column,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
_deleteall_internal(row, column, timestamp, args)
end
#----------------------------------------------------------------------------------------------
# Delete a row
def _deleteall_internal(row, column = nil,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
# delete operation doesn't need read permission. Retaining the read check for
# meta table as a part of HBASE-5837.
if is_meta_table?
raise ArgumentError, "Row Not Found" if _get_internal(row).nil?
raise ArgumentError, 'Row Not Found' if _get_internal(row).nil?
end
if row.kind_of?(Hash)
if row.is_a?(Hash)
_deleterows_internal(row, column, timestamp, args)
else
d = _createdelete_internal(row, column, timestamp, args)
@ -241,19 +241,17 @@ EOF
#----------------------------------------------------------------------------------------------
# Increment a counter atomically
def _incr_internal(row, column, value = nil, args={})
if value.kind_of?(Hash)
value = 1
end
def _incr_internal(row, column, value = nil, args = {})
value = 1 if value.is_a?(Hash)
value ||= 1
incr = org.apache.hadoop.hbase.client.Increment.new(row.to_s.to_java_bytes)
family, qualifier = parse_column_name(column)
if qualifier.nil?
raise ArgumentError, "Failed to provide both column family and column qualifier for incr"
raise ArgumentError, 'Failed to provide both column family and column qualifier for incr'
end
if args.any?
attributes = args[ATTRIBUTES]
visibility = args[VISIBILITY]
attributes = args[ATTRIBUTES]
visibility = args[VISIBILITY]
set_attributes(incr, attributes) if attributes
set_cell_visibility(incr, visibility) if visibility
ttl = args[TTL]
@ -265,21 +263,21 @@ EOF
# Fetch cell value
cell = result.listCells[0]
org.apache.hadoop.hbase.util.Bytes::toLong(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
org.apache.hadoop.hbase.util.Bytes.toLong(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
end
#----------------------------------------------------------------------------------------------
# appends the value atomically
def _append_internal(row, column, value, args={})
def _append_internal(row, column, value, args = {})
append = org.apache.hadoop.hbase.client.Append.new(row.to_s.to_java_bytes)
family, qualifier = parse_column_name(column)
if qualifier.nil?
raise ArgumentError, "Failed to provide both column family and column qualifier for append"
raise ArgumentError, 'Failed to provide both column family and column qualifier for append'
end
if args.any?
attributes = args[ATTRIBUTES]
visibility = args[VISIBILITY]
attributes = args[ATTRIBUTES]
visibility = args[VISIBILITY]
set_attributes(append, attributes) if attributes
set_cell_visibility(append, visibility) if visibility
ttl = args[TTL]
@ -291,31 +289,30 @@ EOF
# Fetch cell value
cell = result.listCells[0]
org.apache.hadoop.hbase.util.Bytes::toStringBinary(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
org.apache.hadoop.hbase.util.Bytes.toStringBinary(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
end
#----------------------------------------------------------------------------------------------
# Count rows in a table
def _count_internal(interval = 1000, scan = nil)
raise(ArgumentError, "Scan argument should be org.apache.hadoop.hbase.client.Scan") \
unless scan == nil || scan.kind_of?(org.apache.hadoop.hbase.client.Scan)
raise(ArgumentError, 'Scan argument should be org.apache.hadoop.hbase.client.Scan') \
unless scan.nil? || scan.is_a?(org.apache.hadoop.hbase.client.Scan)
# We can safely set scanner caching with the first key only filter
if scan == nil
if scan.nil?
scan = org.apache.hadoop.hbase.client.Scan.new
scan.setCacheBlocks(false)
scan.setCaching(10)
scan.setFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new)
else
scan.setCacheBlocks(false)
filter = scan.getFilter()
filter = scan.getFilter
firstKeyOnlyFilter = org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter.new
if filter == nil
if filter.nil?
scan.setFilter(firstKeyOnlyFilter)
else
firstKeyOnlyFilter.setReversed(filter.isReversed())
firstKeyOnlyFilter.setReversed(filter.isReversed)
scan.setFilter(org.apache.hadoop.hbase.filter.FilterList.new(filter, firstKeyOnlyFilter))
end
end
@ -329,15 +326,15 @@ EOF
while iter.hasNext
row = iter.next
count += 1
next unless (block_given? && count % interval == 0)
next unless block_given? && count % interval == 0
# Allow command modules to visualize counting process
yield(count,
org.apache.hadoop.hbase.util.Bytes::toStringBinary(row.getRow))
org.apache.hadoop.hbase.util.Bytes.toStringBinary(row.getRow))
end
scanner.close()
scanner.close
# Return the counter
return count
count
end
#----------------------------------------------------------------------------------------------
@ -346,19 +343,19 @@ EOF
get = org.apache.hadoop.hbase.client.Get.new(row.to_s.to_java_bytes)
maxlength = -1
count = 0
@converters.clear()
@converters.clear
# Normalize args
args = args.first if args.first.kind_of?(Hash)
if args.kind_of?(String) || args.kind_of?(Array)
columns = [ args ].flatten.compact
args = args.first if args.first.is_a?(Hash)
if args.is_a?(String) || args.is_a?(Array)
columns = [args].flatten.compact
args = { COLUMNS => columns }
end
#
# Parse arguments
#
unless args.kind_of?(Hash)
unless args.is_a?(Hash)
raise ArgumentError, "Failed parse of #{args.inspect}, #{args.class}"
end
@ -373,17 +370,17 @@ EOF
converter_class = args.delete(FORMATTER_CLASS) || 'org.apache.hadoop.hbase.util.Bytes'
unless args.empty?
columns = args[COLUMN] || args[COLUMNS]
if args[VERSIONS]
vers = args[VERSIONS]
else
vers = 1
end
vers = if args[VERSIONS]
args[VERSIONS]
else
1
end
if columns
# Normalize types, convert string to an array of strings
columns = [ columns ] if columns.is_a?(String)
columns = [columns] if columns.is_a?(String)
# At this point it is either an array or some unsupported stuff
unless columns.kind_of?(Array)
unless columns.is_a?(Array)
raise ArgumentError, "Failed parse column argument type #{args.inspect}, #{args.class}"
end
@ -403,14 +400,14 @@ EOF
get.setTimeRange(args[TIMERANGE][0], args[TIMERANGE][1]) if args[TIMERANGE]
else
if attributes
set_attributes(get, attributes)
set_attributes(get, attributes)
elsif authorizations
set_authorizations(get, authorizations)
set_authorizations(get, authorizations)
else
# May have passed TIMESTAMP and row only; wants all columns from ts.
unless ts = args[TIMESTAMP] || tr = args[TIMERANGE]
raise ArgumentError, "Failed parse of #{args.inspect}, #{args.class}"
end
# May have passed TIMESTAMP and row only; wants all columns from ts.
unless ts = args[TIMESTAMP] || tr = args[TIMERANGE]
raise ArgumentError, "Failed parse of #{args.inspect}, #{args.class}"
end
end
get.setMaxVersions(vers)
@ -422,11 +419,12 @@ EOF
set_authorizations(get, authorizations) if authorizations
end
unless filter.class == String
get.setFilter(filter)
else
if filter.class == String
get.setFilter(
org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes))
org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes)
)
else
get.setFilter(filter)
end
get.setConsistency(org.apache.hadoop.hbase.client.Consistency.valueOf(consistency)) if consistency
@ -444,9 +442,9 @@ EOF
res = {}
result.listCells.each do |c|
family = convert_bytes_with_position(c.getFamilyArray,
c.getFamilyOffset, c.getFamilyLength, converter_class, converter)
c.getFamilyOffset, c.getFamilyLength, converter_class, converter)
qualifier = convert_bytes_with_position(c.getQualifierArray,
c.getQualifierOffset, c.getQualifierLength, converter_class, converter)
c.getQualifierOffset, c.getQualifierLength, converter_class, converter)
column = "#{family}:#{qualifier}"
value = to_string(column, c, maxlength, converter_class, converter)
@ -459,7 +457,7 @@ EOF
end
# If block given, we've yielded all the results, otherwise just return them
return ((block_given?) ? [count, is_stale]: res)
(block_given? ? [count, is_stale] : res)
end
#----------------------------------------------------------------------------------------------
@ -477,48 +475,48 @@ EOF
# Fetch cell value
cell = result.listCells[0]
org.apache.hadoop.hbase.util.Bytes::toLong(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
org.apache.hadoop.hbase.util.Bytes.toLong(cell.getValueArray,
cell.getValueOffset, cell.getValueLength)
end
def _hash_to_scan(args)
if args.any?
enablemetrics = args["ALL_METRICS"].nil? ? false : args["ALL_METRICS"]
enablemetrics = enablemetrics || !args["METRICS"].nil?
filter = args["FILTER"]
startrow = args["STARTROW"] || ''
stoprow = args["STOPROW"]
rowprefixfilter = args["ROWPREFIXFILTER"]
timestamp = args["TIMESTAMP"]
columns = args["COLUMNS"] || args["COLUMN"] || []
enablemetrics = args['ALL_METRICS'].nil? ? false : args['ALL_METRICS']
enablemetrics ||= !args['METRICS'].nil?
filter = args['FILTER']
startrow = args['STARTROW'] || ''
stoprow = args['STOPROW']
rowprefixfilter = args['ROWPREFIXFILTER']
timestamp = args['TIMESTAMP']
columns = args['COLUMNS'] || args['COLUMN'] || []
# If CACHE_BLOCKS not set, then default 'true'.
cache_blocks = args["CACHE_BLOCKS"].nil? ? true: args["CACHE_BLOCKS"]
cache = args["CACHE"] || 0
reversed = args["REVERSED"] || false
versions = args["VERSIONS"] || 1
cache_blocks = args['CACHE_BLOCKS'].nil? ? true : args['CACHE_BLOCKS']
cache = args['CACHE'] || 0
reversed = args['REVERSED'] || false
versions = args['VERSIONS'] || 1
timerange = args[TIMERANGE]
raw = args["RAW"] || false
raw = args['RAW'] || false
attributes = args[ATTRIBUTES]
authorizations = args[AUTHORIZATIONS]
consistency = args[CONSISTENCY]
# Normalize column names
columns = [columns] if columns.class == String
limit = args["LIMIT"] || -1
unless columns.kind_of?(Array)
raise ArgumentError.new("COLUMNS must be specified as a String or an Array")
limit = args['LIMIT'] || -1
unless columns.is_a?(Array)
raise ArgumentError, 'COLUMNS must be specified as a String or an Array'
end
scan = if stoprow
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes, stoprow.to_java_bytes)
else
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes)
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes, stoprow.to_java_bytes)
else
org.apache.hadoop.hbase.client.Scan.new(startrow.to_java_bytes)
end
# This will overwrite any startrow/stoprow settings
scan.setRowPrefixFilter(rowprefixfilter.to_java_bytes) if rowprefixfilter
# Clear converters from last scan.
@converters.clear()
@converters.clear
columns.each do |c|
family, qualifier = parse_column_name(c.to_s)
@ -529,11 +527,12 @@ EOF
end
end
unless filter.class == String
scan.setFilter(filter)
else
if filter.class == String
scan.setFilter(
org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes))
org.apache.hadoop.hbase.filter.ParseFilter.new.parseFilterString(filter.to_java_bytes)
)
else
scan.setFilter(filter)
end
scan.setScanMetricsEnabled(enablemetrics) if enablemetrics
@ -562,19 +561,19 @@ EOF
#----------------------------------------------------------------------------------------------
# Scans whole table or a range of keys and returns rows matching specific criteria
def _scan_internal(args = {}, scan = nil)
raise(ArgumentError, "Args should be a Hash") unless args.kind_of?(Hash)
raise(ArgumentError, "Scan argument should be org.apache.hadoop.hbase.client.Scan") \
unless scan == nil || scan.kind_of?(org.apache.hadoop.hbase.client.Scan)
raise(ArgumentError, 'Args should be a Hash') unless args.is_a?(Hash)
raise(ArgumentError, 'Scan argument should be org.apache.hadoop.hbase.client.Scan') \
unless scan.nil? || scan.is_a?(org.apache.hadoop.hbase.client.Scan)
limit = args["LIMIT"] || -1
maxlength = args.delete("MAXLENGTH") || -1
limit = args['LIMIT'] || -1
maxlength = args.delete('MAXLENGTH') || -1
converter = args.delete(FORMATTER) || nil
converter_class = args.delete(FORMATTER_CLASS) || 'org.apache.hadoop.hbase.util.Bytes'
count = 0
res = {}
# Start the scanner
scan = scan == nil ? _hash_to_scan(args) : scan
scan = scan.nil? ? _hash_to_scan(args) : scan
scanner = @table.getScanner(scan)
iter = scanner.iterator
@ -586,9 +585,9 @@ EOF
row.listCells.each do |c|
family = convert_bytes_with_position(c.getFamilyArray,
c.getFamilyOffset, c.getFamilyLength, converter_class, converter)
c.getFamilyOffset, c.getFamilyLength, converter_class, converter)
qualifier = convert_bytes_with_position(c.getQualifierArray,
c.getQualifierOffset, c.getQualifierLength, converter_class, converter)
c.getQualifierOffset, c.getQualifierLength, converter_class, converter)
column = "#{family}:#{qualifier}"
cell = to_string(column, c, maxlength, converter_class, converter)
@ -609,25 +608,26 @@ EOF
end
end
scanner.close()
return ((block_given?) ? [count, is_stale] : res)
scanner.close
(block_given? ? [count, is_stale] : res)
end
# Apply OperationAttributes to puts/scans/gets
# Apply OperationAttributes to puts/scans/gets
def set_attributes(oprattr, attributes)
raise(ArgumentError, "Attributes must be a Hash type") unless attributes.kind_of?(Hash)
for k,v in attributes
raise(ArgumentError, 'Attributes must be a Hash type') unless attributes.is_a?(Hash)
for k, v in attributes
v = v.to_s unless v.nil?
oprattr.setAttribute(k.to_s, v.to_java_bytes)
end
end
def set_cell_permissions(op, permissions)
raise(ArgumentError, "Permissions must be a Hash type") unless permissions.kind_of?(Hash)
raise(ArgumentError, 'Permissions must be a Hash type') unless permissions.is_a?(Hash)
map = java.util.HashMap.new
permissions.each do |user,perms|
permissions.each do |user, perms|
map.put(user.to_s, org.apache.hadoop.hbase.security.access.Permission.new(
perms.to_java_bytes))
perms.to_java_bytes
))
end
op.setACL(map)
end
@ -635,15 +635,19 @@ EOF
def set_cell_visibility(oprattr, visibility)
oprattr.setCellVisibility(
org.apache.hadoop.hbase.security.visibility.CellVisibility.new(
visibility.to_s))
visibility.to_s
)
)
end
def set_authorizations(oprattr, authorizations)
raise(ArgumentError, "Authorizations must be a Array type") unless authorizations.kind_of?(Array)
auths = [ authorizations ].flatten.compact
raise(ArgumentError, 'Authorizations must be a Array type') unless authorizations.is_a?(Array)
auths = [authorizations].flatten.compact
oprattr.setAuthorizations(
org.apache.hadoop.hbase.security.visibility.Authorizations.new(
auths.to_java(:string)))
auths.to_java(:string)
)
)
end
def set_op_ttl(op, ttl)
@ -664,14 +668,14 @@ EOF
end
end
#Add the following admin utilities to the table
# Add the following admin utilities to the table
add_admin_utils :enable, :disable, :flush, :drop, :describe, :snapshot
#----------------------------
#give the general help for the table
# give the general help for the table
# or the named command
def help (command = nil)
#if there is a command, get the per-command help from the shell
def help(command = nil)
# if there is a command, get the per-command help from the shell
if command
begin
return @shell.help_command(command)
@ -680,13 +684,13 @@ EOF
return nil
end
end
return @shell.help('table_help')
@shell.help('table_help')
end
# Table to string
def to_s
cl = self.class()
return "#{cl} - #{@name}"
cl = self.class
"#{cl} - #{@name}"
end
# Standard ruby call to get the return value for an object
@ -707,51 +711,51 @@ EOF
# Checks if current table is one of the 'meta' tables
def is_meta_table?
org.apache.hadoop.hbase.TableName::META_TABLE_NAME.equals(@table.getName())
org.apache.hadoop.hbase.TableName::META_TABLE_NAME.equals(@table.getName)
end
# Returns family and (when has it) qualifier for a column name
def parse_column_name(column)
split = org.apache.hadoop.hbase.KeyValue.parseColumn(column.to_java_bytes)
set_converter(split) if split.length > 1
return split[0], (split.length > 1) ? split[1] : nil
[split[0], split.length > 1 ? split[1] : nil]
end
# Make a String of the passed kv
# Intercept cells whose format we know such as the info:regioninfo in hbase:meta
def to_string(column, kv, maxlength = -1, converter_class = nil, converter=nil)
def to_string(column, kv, maxlength = -1, converter_class = nil, converter = nil)
if is_meta_table?
if column == 'info:regioninfo' or column == 'info:splitA' or column == 'info:splitB'
if column == 'info:regioninfo' || column == 'info:splitA' || column == 'info:splitB'
hri = org.apache.hadoop.hbase.HRegionInfo.parseFromOrNull(kv.getValueArray,
kv.getValueOffset, kv.getValueLength)
return "timestamp=%d, value=%s" % [kv.getTimestamp, hri.toString]
kv.getValueOffset, kv.getValueLength)
return format('timestamp=%d, value=%s', kv.getTimestamp, hri.toString)
end
if column == 'info:serverstartcode'
if kv.getValueLength > 0
str_val = org.apache.hadoop.hbase.util.Bytes.toLong(kv.getValueArray,
kv.getValueOffset, kv.getValueLength)
str_val = org.apache.hadoop.hbase.util.Bytes.toLong(kv.getValueArray,
kv.getValueOffset, kv.getValueLength)
else
str_val = org.apache.hadoop.hbase.util.Bytes.toStringBinary(kv.getValueArray,
kv.getValueOffset, kv.getValueLength)
kv.getValueOffset, kv.getValueLength)
end
return "timestamp=%d, value=%s" % [kv.getTimestamp, str_val]
return format('timestamp=%d, value=%s', kv.getTimestamp, str_val)
end
end
if kv.isDelete
val = "timestamp=#{kv.getTimestamp}, type=#{org.apache.hadoop.hbase.KeyValue::Type::codeToType(kv.getType)}"
val = "timestamp=#{kv.getTimestamp}, type=#{org.apache.hadoop.hbase.KeyValue::Type.codeToType(kv.getType)}"
else
val = "timestamp=#{kv.getTimestamp}, value=#{convert(column, kv, converter_class, converter)}"
end
(maxlength != -1) ? val[0, maxlength] : val
maxlength != -1 ? val[0, maxlength] : val
end
def convert(column, kv, converter_class='org.apache.hadoop.hbase.util.Bytes', converter='toStringBinary')
#use org.apache.hadoop.hbase.util.Bytes as the default class
def convert(column, kv, converter_class = 'org.apache.hadoop.hbase.util.Bytes', converter = 'toStringBinary')
# use org.apache.hadoop.hbase.util.Bytes as the default class
converter_class = 'org.apache.hadoop.hbase.util.Bytes' unless converter_class
#use org.apache.hadoop.hbase.util.Bytes::toStringBinary as the default convertor
# use org.apache.hadoop.hbase.util.Bytes::toStringBinary as the default convertor
converter = 'toStringBinary' unless converter
if @converters.has_key?(column)
if @converters.key?(column)
# lookup the CONVERTER for certain column - "cf:qualifier"
matches = /c\((.+)\)\.(.+)/.match(@converters[column])
if matches.nil?
@ -767,7 +771,7 @@ EOF
convert_bytes(org.apache.hadoop.hbase.CellUtil.cloneValue(kv), klazz_name, converter)
end
def convert_bytes(bytes, converter_class=nil, converter_method=nil)
def convert_bytes(bytes, converter_class = nil, converter_method = nil)
convert_bytes_with_position(bytes, 0, bytes.length, converter_class, converter_method)
end
@ -792,14 +796,14 @@ EOF
#----------------------------------------------------------------------------------------------
# Get the split points for the table
def _get_splits_internal()
locator = @table.getRegionLocator()
splits = locator.getAllRegionLocations().
map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.delete_if{|k| k == ""}
locator.close()
puts("Total number of splits = %s" % [splits.size + 1])
def _get_splits_internal
locator = @table.getRegionLocator
splits = locator.getAllRegionLocations
.map { |i| Bytes.toStringBinary(i.getRegionInfo.getStartKey) }.delete_if { |k| k == '' }
locator.close
puts(format('Total number of splits = %s', splits.size + 1))
puts splits
return splits
splits
end
end
end

View File

@ -21,8 +21,8 @@
include Java
# Add the $HBASE_HOME/lib directory to the ruby load_path to load jackson
if File.exists?(File.join(File.dirname(__FILE__), "..", "lib"))
$LOAD_PATH.unshift File.join(File.dirname(__FILE__), "..", "lib")
if File.exist?(File.join(File.dirname(__FILE__), '..', 'lib'))
$LOAD_PATH.unshift File.join(File.dirname(__FILE__), '..', 'lib')
end
module Hbase
@ -32,34 +32,29 @@ module Hbase
#---------------------------------------------------------------------------------------------
# Represents information reported by a server on a single MonitoredTask
class Task
def initialize(taskMap,host)
taskMap.each_pair do |k,v|
def initialize(taskMap, host)
taskMap.each_pair do |k, v|
case k
when "statustimems"
@statustime = Time.at(v/1000)
when "status"
@status = v
when "starttimems"
@starttime = Time.at(v/1000)
when "description"
@description = v
when "state"
@state = v
when 'statustimems'
@statustime = Time.at(v / 1000)
when 'status'
@status = v
when 'starttimems'
@starttime = Time.at(v / 1000)
when 'description'
@description = v
when 'state'
@state = v
end
end
@host = host
end
def statustime
# waiting IPC handlers often have statustime = -1, in this case return starttime
if @statustime > Time.at(-1)
return @statustime
end
return @starttime
return @statustime if @statustime > Time.at(-1)
@starttime
end
attr_reader :host
@ -67,120 +62,111 @@ module Hbase
attr_reader :starttime
attr_reader :description
attr_reader :state
end
def initialize(configuration)
@conf = configuration
@conn = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(@conf)
@admin = @conn.getAdmin()
@admin = @conn.getAdmin
end
#---------------------------------------------------------------------------------------------------
# Returns a filtered list of tasks on the given host
def tasksOnHost(filter,host)
def tasksOnHost(filter, host)
java_import 'java.net.URL'
java_import 'org.codehaus.jackson.map.ObjectMapper'
infoport = @admin.getClusterStatus().getLoad(host).getInfoServerPort().to_s
infoport = @admin.getClusterStatus.getLoad(host).getInfoServerPort.to_s
# Note: This condition use constants from hbase-server
#if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY,
# if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY,
# org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_DEFAULT))
# schema = "http://"
#else
# else
# schema = "https://"
#end
schema = "http://"
url = schema + host.hostname + ":" + infoport + "/rs-status?format=json&filter=" + filter
# end
schema = 'http://'
url = schema + host.hostname + ':' + infoport + '/rs-status?format=json&filter=' + filter
json = URL.new(url)
mapper = ObjectMapper.new
# read and parse JSON
tasksArrayList = mapper.readValue(json,java.lang.Object.java_class)
tasksArrayList = mapper.readValue(json, java.lang.Object.java_class)
# convert to an array of TaskMonitor::Task instances
tasks = Array.new
tasks = []
tasksArrayList.each do |t|
tasks.unshift Task.new(t,host)
tasks.unshift Task.new(t, host)
end
return tasks
tasks
end
#---------------------------------------------------------------------------------------------------
# Prints a table of filtered tasks on requested hosts
def tasks(filter,hosts)
def tasks(filter, hosts)
# put all tasks on all requested hosts in the same list
tasks = []
hosts.each do |host|
tasks.concat(tasksOnHost(filter,host))
tasks.concat(tasksOnHost(filter, host))
end
puts("%d tasks as of: %s" % [tasks.size,Time.now.strftime("%Y-%m-%d %H:%M:%S")])
puts(format('%d tasks as of: %s', tasks.size, Time.now.strftime('%Y-%m-%d %H:%M:%S')))
if tasks.size() == 0
puts("No " + filter + " tasks currently running.")
if tasks.empty?
puts('No ' + filter + ' tasks currently running.')
else
# determine table width
longestStatusWidth = 0
longestDescriptionWidth = 0
tasks.each do |t|
longestStatusWidth = [longestStatusWidth,t.status.length].max
longestDescriptionWidth = [longestDescriptionWidth,t.description.length].max
longestStatusWidth = [longestStatusWidth, t.status.length].max
longestDescriptionWidth = [longestDescriptionWidth, t.description.length].max
end
# set the maximum character width of each column, without padding
hostWidth = 15
startTimeWidth = 19
stateWidth = 8
descriptionWidth = [32,longestDescriptionWidth].min
statusWidth = [36,longestStatusWidth + 27].min
descriptionWidth = [32, longestDescriptionWidth].min
statusWidth = [36, longestStatusWidth + 27].min
rowSeparator = "+" + "-" * (hostWidth + 2) +
"+" + "-" * (startTimeWidth + 2) +
"+" + "-" * (stateWidth + 2) +
"+" + "-" * (descriptionWidth + 2) +
"+" + "-" * (statusWidth + 2) + "+"
rowSeparator = '+' + '-' * (hostWidth + 2) +
'+' + '-' * (startTimeWidth + 2) +
'+' + '-' * (stateWidth + 2) +
'+' + '-' * (descriptionWidth + 2) +
'+' + '-' * (statusWidth + 2) + '+'
# print table header
cells = [setCellWidth("Host",hostWidth),
setCellWidth("Start Time",startTimeWidth),
setCellWidth("State",stateWidth),
setCellWidth("Description",descriptionWidth),
setCellWidth("Status",statusWidth)]
cells = [setCellWidth('Host', hostWidth),
setCellWidth('Start Time', startTimeWidth),
setCellWidth('State', stateWidth),
setCellWidth('Description', descriptionWidth),
setCellWidth('Status', statusWidth)]
line = "| %s | %s | %s | %s | %s |" % cells
line = format('| %s | %s | %s | %s | %s |', cells)
puts(rowSeparator)
puts(line)
# print table content
tasks.each do |t|
cells = [setCellWidth(t.host.hostname, hostWidth),
setCellWidth(t.starttime.strftime('%Y-%m-%d %H:%M:%S'), startTimeWidth),
setCellWidth(t.state, stateWidth),
setCellWidth(t.description, descriptionWidth),
setCellWidth(format('%s (since %d seconds ago)', t.status, Time.now - t.statustime), statusWidth)]
cells = [setCellWidth(t.host.hostname,hostWidth),
setCellWidth(t.starttime.strftime("%Y-%m-%d %H:%M:%S"),startTimeWidth),
setCellWidth(t.state,stateWidth),
setCellWidth(t.description,descriptionWidth),
setCellWidth("%s (since %d seconds ago)" %
[t.status,Time.now - t.statustime], statusWidth)]
line = "| %s | %s | %s | %s | %s |" % cells
line = format('| %s | %s | %s | %s | %s |', cells)
puts(rowSeparator)
puts(line)
end
puts(rowSeparator)
end
end
#---------------------------------------------------------------------------------------------------
@ -189,16 +175,15 @@ module Hbase
#
# right-pad with spaces or truncate with ellipses to match passed width
def setCellWidth(cellContent,width)
numCharsTooShort = width-cellContent.length
def setCellWidth(cellContent, width)
numCharsTooShort = width - cellContent.length
if numCharsTooShort < 0
# cellContent is too long, so truncate
return cellContent[0,[width-3,0].max] + "." * [3,width].min
return cellContent[0, [width - 3, 0].max] + '.' * [3, width].min
else
# cellContent is requested width or too short, so right-pad with zero or more spaces
return cellContent + " " * numCharsTooShort
return cellContent + ' ' * numCharsTooShort
end
end
end
end

View File

@ -23,10 +23,9 @@ java_import org.apache.hadoop.hbase.util.Bytes
module Hbase
class VisibilityLabelsAdmin
def initialize(admin)
@admin = admin
@connection = @admin.getConnection()
@connection = @admin.getConnection
end
def close
@ -36,53 +35,43 @@ module Hbase
def add_labels(*args)
visibility_feature_available?
# Normalize args
if args.kind_of?(Array)
labels = [ args ].flatten.compact
end
if labels.size() == 0
raise(ArgumentError, "Arguments cannot be null")
end
labels = [args].flatten.compact if args.is_a?(Array)
raise(ArgumentError, 'Arguments cannot be null') if labels.empty?
begin
response = VisibilityClient.addLabels(@connection, labels.to_java(:string))
if response.nil?
raise(ArgumentError, "DISABLED: Visibility labels feature is not available")
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available')
end
labelsWithException = ""
list = response.getResultList()
labelsWithException = ''
list = response.getResultList
list.each do |result|
if result.hasException()
labelsWithException += Bytes.toString(result.getException().getValue().toByteArray())
end
end
if labelsWithException.length > 0
raise(ArgumentError, labelsWithException)
end
if result.hasException
labelsWithException += Bytes.toString(result.getException.getValue.toByteArray)
end
end
raise(ArgumentError, labelsWithException) unless labelsWithException.empty?
end
end
def set_auths(user, *args)
visibility_feature_available?
# Normalize args
if args.kind_of?(Array)
auths = [ args ].flatten.compact
end
auths = [args].flatten.compact if args.is_a?(Array)
begin
response = VisibilityClient.setAuths(@connection, auths.to_java(:string), user)
if response.nil?
raise(ArgumentError, "DISABLED: Visibility labels feature is not available")
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available')
end
labelsWithException = ""
list = response.getResultList()
labelsWithException = ''
list = response.getResultList
list.each do |result|
if result.hasException()
labelsWithException += Bytes.toString(result.getException().getValue().toByteArray())
end
end
if labelsWithException.length > 0
raise(ArgumentError, labelsWithException)
if result.hasException
labelsWithException += Bytes.toString(result.getException.getValue.toByteArray)
end
end
raise(ArgumentError, labelsWithException) unless labelsWithException.empty?
end
end
@ -91,18 +80,18 @@ module Hbase
begin
response = VisibilityClient.getAuths(@connection, user)
if response.nil?
raise(ArgumentError, "DISABLED: Visibility labels feature is not available")
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available')
end
return response.getAuthList
end
end
def list_labels(regex = ".*")
def list_labels(regex = '.*')
visibility_feature_available?
begin
response = VisibilityClient.listLabels(@connection, regex)
if response.nil?
raise(ArgumentError, "DISABLED: Visibility labels feature is not available")
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available')
end
return response.getLabelList
end
@ -111,30 +100,26 @@ module Hbase
def clear_auths(user, *args)
visibility_feature_available?
# Normalize args
if args.kind_of?(Array)
auths = [ args ].flatten.compact
end
auths = [args].flatten.compact if args.is_a?(Array)
begin
response = VisibilityClient.clearAuths(@connection, auths.to_java(:string), user)
if response.nil?
raise(ArgumentError, "DISABLED: Visibility labels feature is not available")
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available')
end
labelsWithException = ""
list = response.getResultList()
labelsWithException = ''
list = response.getResultList
list.each do |result|
if result.hasException()
labelsWithException += Bytes.toString(result.getException().getValue().toByteArray())
end
end
if labelsWithException.length > 0
raise(ArgumentError, labelsWithException)
if result.hasException
labelsWithException += Bytes.toString(result.getException.getValue.toByteArray)
end
end
raise(ArgumentError, labelsWithException) unless labelsWithException.empty?
end
end
# Make sure that lables table is available
def visibility_feature_available?()
def visibility_feature_available?
caps = []
begin
# Try the getSecurityCapabilities API where supported.
@ -142,11 +127,11 @@ module Hbase
rescue
# If we are unable to use getSecurityCapabilities, fall back with a check for
# deployment of the labels table
raise(ArgumentError, "DISABLED: Visibility labels feature is not available") unless \
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available') unless \
exists?(VisibilityConstants::LABELS_TABLE_NAME)
return
end
raise(ArgumentError, "DISABLED: Visibility labels feature is not available") unless \
raise(ArgumentError, 'DISABLED: Visibility labels feature is not available') unless \
caps.include? org.apache.hadoop.hbase.client.security.SecurityCapability::CELL_VISIBILITY
end

View File

@ -32,62 +32,62 @@ java_import('java.lang.Long') { |_package, name| "J#{name}" }
java_import('java.lang.Boolean') { |_package, name| "J#{name}" }
module HBaseConstants
COLUMN = "COLUMN"
COLUMNS = "COLUMNS"
TIMESTAMP = "TIMESTAMP"
TIMERANGE = "TIMERANGE"
COLUMN = 'COLUMN'.freeze
COLUMNS = 'COLUMNS'.freeze
TIMESTAMP = 'TIMESTAMP'.freeze
TIMERANGE = 'TIMERANGE'.freeze
NAME = org.apache.hadoop.hbase.HConstants::NAME
VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION
METADATA = org.apache.hadoop.hbase.HConstants::METADATA
STOPROW = "STOPROW"
STARTROW = "STARTROW"
ROWPREFIXFILTER = "ROWPREFIXFILTER"
STOPROW = 'STOPROW'.freeze
STARTROW = 'STARTROW'.freeze
ROWPREFIXFILTER = 'ROWPREFIXFILTER'.freeze
ENDROW = STOPROW
RAW = "RAW"
LIMIT = "LIMIT"
METHOD = "METHOD"
MAXLENGTH = "MAXLENGTH"
CACHE_BLOCKS = "CACHE_BLOCKS"
ALL_METRICS = "ALL_METRICS"
METRICS = "METRICS"
REVERSED = "REVERSED"
REPLICATION_SCOPE = "REPLICATION_SCOPE"
INTERVAL = 'INTERVAL'
CACHE = 'CACHE'
FILTER = 'FILTER'
SPLITS = 'SPLITS'
SPLITS_FILE = 'SPLITS_FILE'
SPLITALGO = 'SPLITALGO'
NUMREGIONS = 'NUMREGIONS'
REGION_REPLICATION = 'REGION_REPLICATION'
REGION_REPLICA_ID = 'REGION_REPLICA_ID'
RAW = 'RAW'.freeze
LIMIT = 'LIMIT'.freeze
METHOD = 'METHOD'.freeze
MAXLENGTH = 'MAXLENGTH'.freeze
CACHE_BLOCKS = 'CACHE_BLOCKS'.freeze
ALL_METRICS = 'ALL_METRICS'.freeze
METRICS = 'METRICS'.freeze
REVERSED = 'REVERSED'.freeze
REPLICATION_SCOPE = 'REPLICATION_SCOPE'.freeze
INTERVAL = 'INTERVAL'.freeze
CACHE = 'CACHE'.freeze
FILTER = 'FILTER'.freeze
SPLITS = 'SPLITS'.freeze
SPLITS_FILE = 'SPLITS_FILE'.freeze
SPLITALGO = 'SPLITALGO'.freeze
NUMREGIONS = 'NUMREGIONS'.freeze
REGION_REPLICATION = 'REGION_REPLICATION'.freeze
REGION_REPLICA_ID = 'REGION_REPLICA_ID'.freeze
CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
ATTRIBUTES="ATTRIBUTES"
VISIBILITY="VISIBILITY"
AUTHORIZATIONS = "AUTHORIZATIONS"
SKIP_FLUSH = 'SKIP_FLUSH'
CONSISTENCY = "CONSISTENCY"
USER = 'USER'
TABLE = 'TABLE'
NAMESPACE = 'NAMESPACE'
TYPE = 'TYPE'
NONE = 'NONE'
VALUE = 'VALUE'
ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'
CLUSTER_KEY = 'CLUSTER_KEY'
TABLE_CFS = 'TABLE_CFS'
NAMESPACES = 'NAMESPACES'
CONFIG = 'CONFIG'
DATA = 'DATA'
SERVER_NAME = 'SERVER_NAME'
LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
RESTORE_ACL = 'RESTORE_ACL'
FORMATTER = 'FORMATTER'
FORMATTER_CLASS = 'FORMATTER_CLASS'
POLICY = 'POLICY'
REGIONSERVER = 'REGIONSERVER'
ATTRIBUTES = 'ATTRIBUTES'.freeze
VISIBILITY = 'VISIBILITY'.freeze
AUTHORIZATIONS = 'AUTHORIZATIONS'.freeze
SKIP_FLUSH = 'SKIP_FLUSH'.freeze
CONSISTENCY = 'CONSISTENCY'.freeze
USER = 'USER'.freeze
TABLE = 'TABLE'.freeze
NAMESPACE = 'NAMESPACE'.freeze
TYPE = 'TYPE'.freeze
NONE = 'NONE'.freeze
VALUE = 'VALUE'.freeze
ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'.freeze
CLUSTER_KEY = 'CLUSTER_KEY'.freeze
TABLE_CFS = 'TABLE_CFS'.freeze
NAMESPACES = 'NAMESPACES'.freeze
CONFIG = 'CONFIG'.freeze
DATA = 'DATA'.freeze
SERVER_NAME = 'SERVER_NAME'.freeze
LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'.freeze
RESTORE_ACL = 'RESTORE_ACL'.freeze
FORMATTER = 'FORMATTER'.freeze
FORMATTER_CLASS = 'FORMATTER_CLASS'.freeze
POLICY = 'POLICY'.freeze
REGIONSERVER = 'REGIONSERVER'.freeze
# Load constants from hbase java API
def self.promote_constants(constants)

View File

@ -32,28 +32,25 @@ module IRB
# down in IRB didn't seem to work. I think the worst thing that can
# happen is the shell exiting because of failed IRB construction with
# no error (though we're not blanking STDERR)
begin
# Map the '/dev/null' according to the runing platform
# Under Windows platform the 'dev/null' is not fully compliant with unix,
# and the 'NUL' object need to be use instead.
devnull = "/dev/null"
devnull = "NUL" if WINDOZE
f = File.open(devnull, "w")
$stdout = f
super
ensure
f.close()
$stdout = STDOUT
end
# Map the '/dev/null' according to the runing platform
# Under Windows platform the 'dev/null' is not fully compliant with unix,
# and the 'NUL' object need to be use instead.
devnull = '/dev/null'
devnull = 'NUL' if WINDOZE
f = File.open(devnull, 'w')
$stdout = f
super
ensure
f.close
$stdout = STDOUT
end
def output_value
# Suppress output if last_value is 'nil'
# Otherwise, when user types help, get ugly 'nil'
# after all output.
if @context.last_value != nil
super
end
super unless @context.last_value.nil?
end
end
end

View File

@ -29,7 +29,7 @@ module Shell
@@command_groups
end
def self.load_command(name, group, aliases=[])
def self.load_command(name, group, aliases = [])
return if commands[name]
# Register command in the group
@ -39,7 +39,7 @@ module Shell
# Load command
begin
require "shell/commands/#{name}"
klass_name = name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase } # camelize
klass_name = name.to_s.gsub(/(?:^|_)(.)/) { Regexp.last_match(1).upcase } # camelize
commands[name] = eval("Commands::#{klass_name}")
aliases.each do |an_alias|
commands[an_alias] = commands[name]
@ -53,10 +53,10 @@ module Shell
raise ArgumentError, "No :commands for group #{group}" unless opts[:commands]
command_groups[group] = {
:commands => [],
:command_names => opts[:commands],
:full_name => opts[:full_name] || group,
:comment => opts[:comment]
commands: [],
command_names: opts[:commands],
full_name: opts[:full_name] || group,
comment: opts[:comment]
}
all_aliases = opts[:aliases] || {}
@ -76,18 +76,18 @@ module Shell
@debug = false
attr_accessor :debug
def initialize(hbase, interactive=true)
def initialize(hbase, interactive = true)
self.hbase = hbase
self.interactive = interactive
end
# Returns Admin class from admin.rb
def admin
@admin ||= hbase.admin()
@admin ||= hbase.admin
end
def hbase_taskmonitor
@hbase_taskmonitor ||= hbase.taskmonitor()
@hbase_taskmonitor ||= hbase.taskmonitor
end
def hbase_table(name)
@ -95,23 +95,23 @@ module Shell
end
def hbase_replication_admin
@hbase_replication_admin ||= hbase.replication_admin()
@hbase_replication_admin ||= hbase.replication_admin
end
def hbase_security_admin
@hbase_security_admin ||= hbase.security_admin()
@hbase_security_admin ||= hbase.security_admin
end
def hbase_visibility_labels_admin
@hbase_visibility_labels_admin ||= hbase.visibility_labels_admin()
@hbase_visibility_labels_admin ||= hbase.visibility_labels_admin
end
def hbase_quotas_admin
@hbase_quotas_admin ||= hbase.quotas_admin()
@hbase_quotas_admin ||= hbase.quotas_admin
end
def hbase_rsgroup_admin
@rsgroup_admin ||= hbase.rsgroup_admin()
@rsgroup_admin ||= hbase.rsgroup_admin
end
def export_commands(where)
@ -140,7 +140,7 @@ module Shell
# Return value is only useful in non-interactive mode, for e.g. tests.
def command(command, *args)
ret = internal_command(command, :command, *args)
if self.interactive
if interactive
return nil
else
return ret
@ -151,8 +151,8 @@ module Shell
# command - name of the command to call
# method_name - name of the method on the command to call. Defaults to just 'command'
# args - to be passed to the named method
def internal_command(command, method_name= :command, *args)
command_instance(command).command_safe(self.debug, method_name, *args)
def internal_command(command, method_name = :command, *args)
command_instance(command).command_safe(debug, method_name, *args)
end
def print_banner
@ -168,12 +168,12 @@ module Shell
puts "Command: #{command}"
puts command_instance(command).help
puts
return nil
nil
end
def help_command(command)
puts command_instance(command).help
return nil
nil
end
def help_group(group_name)
@ -185,7 +185,7 @@ module Shell
puts group[:comment]
puts
end
return nil
nil
end
def help(command = nil)
@ -200,23 +200,23 @@ module Shell
puts
puts 'COMMAND GROUPS:'
::Shell.command_groups.each do |name, group|
puts " Group name: " + name
puts " Commands: " + group[:command_names].sort.join(', ')
puts ' Group name: ' + name
puts ' Commands: ' + group[:command_names].sort.join(', ')
puts
end
unless command
puts 'SHELL USAGE:'
help_footer
end
return nil
nil
end
def help_header
return "HBase Shell, version #{org.apache.hadoop.hbase.util.VersionInfo.getVersion()}, " +
"r#{org.apache.hadoop.hbase.util.VersionInfo.getRevision()}, " +
"#{org.apache.hadoop.hbase.util.VersionInfo.getDate()}" + "\n" +
"Type 'help \"COMMAND\"', (e.g. 'help \"get\"' -- the quotes are necessary) for help on a specific command.\n" +
"Commands are grouped. Type 'help \"COMMAND_GROUP\"', (e.g. 'help \"general\"') for help on a command group."
"HBase Shell, version #{org.apache.hadoop.hbase.util.VersionInfo.getVersion}, " \
"r#{org.apache.hadoop.hbase.util.VersionInfo.getRevision}, " \
"#{org.apache.hadoop.hbase.util.VersionInfo.getDate}" + "\n" \
"Type 'help \"COMMAND\"', (e.g. 'help \"get\"' -- the quotes are necessary) for help on a specific command.\n" \
"Commands are grouped. Type 'help \"COMMAND_GROUP\"', (e.g. 'help \"general\"') for help on a command group."
end
def help_footer
@ -253,8 +253,8 @@ require 'shell/commands'
# Load all commands
Shell.load_command_group(
'general',
:full_name => 'GENERAL HBASE SHELL COMMANDS',
:commands => %w[
full_name: 'GENERAL HBASE SHELL COMMANDS',
commands: %w[
status
version
table_help
@ -265,8 +265,8 @@ Shell.load_command_group(
Shell.load_command_group(
'ddl',
:full_name => 'TABLES MANAGEMENT COMMANDS',
:commands => %w[
full_name: 'TABLES MANAGEMENT COMMANDS',
commands: %w[
alter
create
describe
@ -287,15 +287,15 @@ Shell.load_command_group(
locate_region
list_regions
],
:aliases => {
aliases: {
'describe' => ['desc']
}
)
Shell.load_command_group(
'namespace',
:full_name => 'NAMESPACE MANAGEMENT COMMANDS',
:commands => %w[
full_name: 'NAMESPACE MANAGEMENT COMMANDS',
commands: %w[
create_namespace
drop_namespace
alter_namespace
@ -307,8 +307,8 @@ Shell.load_command_group(
Shell.load_command_group(
'dml',
:full_name => 'DATA MANIPULATION COMMANDS',
:commands => %w[
full_name: 'DATA MANIPULATION COMMANDS',
commands: %w[
count
delete
deleteall
@ -326,9 +326,9 @@ Shell.load_command_group(
Shell.load_command_group(
'tools',
:full_name => 'HBASE SURGERY TOOLS',
:comment => "WARNING: Above commands are for 'experts'-only as misuse can damage an install",
:commands => %w[
full_name: 'HBASE SURGERY TOOLS',
comment: "WARNING: Above commands are for 'experts'-only as misuse can damage an install",
commands: %w[
assign
balancer
balance_switch
@ -359,16 +359,16 @@ Shell.load_command_group(
splitormerge_enabled
clear_compaction_queues
],
# TODO remove older hlog_roll command
:aliases => {
# TODO: remove older hlog_roll command
aliases: {
'wal_roll' => ['hlog_roll']
}
)
Shell.load_command_group(
'replication',
:full_name => 'CLUSTER REPLICATION TOOLS',
:commands => %w[
full_name: 'CLUSTER REPLICATION TOOLS',
commands: %w[
add_peer
remove_peer
list_peers
@ -393,8 +393,8 @@ Shell.load_command_group(
Shell.load_command_group(
'snapshots',
:full_name => 'CLUSTER SNAPSHOT TOOLS',
:commands => %w[
full_name: 'CLUSTER SNAPSHOT TOOLS',
commands: %w[
snapshot
clone_snapshot
restore_snapshot
@ -408,8 +408,8 @@ Shell.load_command_group(
Shell.load_command_group(
'configuration',
:full_name => 'ONLINE CONFIGURATION TOOLS',
:commands => %w[
full_name: 'ONLINE CONFIGURATION TOOLS',
commands: %w[
update_config
update_all_config
]
@ -417,8 +417,8 @@ Shell.load_command_group(
Shell.load_command_group(
'quotas',
:full_name => 'CLUSTER QUOTAS TOOLS',
:commands => %w[
full_name: 'CLUSTER QUOTAS TOOLS',
commands: %w[
set_quota
list_quotas
list_quota_table_sizes
@ -429,9 +429,9 @@ Shell.load_command_group(
Shell.load_command_group(
'security',
:full_name => 'SECURITY TOOLS',
:comment => "NOTE: Above commands are only applicable if running with the AccessController coprocessor",
:commands => %w[
full_name: 'SECURITY TOOLS',
comment: 'NOTE: Above commands are only applicable if running with the AccessController coprocessor',
commands: %w[
list_security_capabilities
grant
revoke
@ -441,8 +441,8 @@ Shell.load_command_group(
Shell.load_command_group(
'procedures',
:full_name => 'PROCEDURES & LOCKS MANAGEMENT',
:commands => %w[
full_name: 'PROCEDURES & LOCKS MANAGEMENT',
commands: %w[
abort_procedure
list_procedures
list_locks
@ -451,9 +451,9 @@ Shell.load_command_group(
Shell.load_command_group(
'visibility labels',
:full_name => 'VISIBILITY LABEL TOOLS',
:comment => "NOTE: Above commands are only applicable if running with the VisibilityController coprocessor",
:commands => %w[
full_name: 'VISIBILITY LABEL TOOLS',
comment: 'NOTE: Above commands are only applicable if running with the VisibilityController coprocessor',
commands: %w[
add_labels
list_labels
set_auths
@ -465,10 +465,10 @@ Shell.load_command_group(
Shell.load_command_group(
'rsgroup',
:full_name => 'RSGroups',
:comment => "NOTE: The rsgroup Coprocessor Endpoint must be enabled on the Master else commands fail with:
full_name: 'RSGroups',
comment: "NOTE: The rsgroup Coprocessor Endpoint must be enabled on the Master else commands fail with:
UnknownProtocolException: No registered Master Coprocessor Endpoint found for RSGroupAdminService",
:commands => %w[
commands: %w[
list_rsgroups
get_rsgroup
add_rsgroup

View File

@ -22,12 +22,11 @@ require 'shell/formatter'
module Shell
module Commands
class Command
def initialize(shell)
@shell = shell
end
#wrap an execution of cmd to catch hbase exceptions
# wrap an execution of cmd to catch hbase exceptions
# cmd - command name to execute
# args - arguments to pass to the command
def command_safe(debug, cmd = :command, *args)
@ -35,7 +34,7 @@ module Shell
# See count.rb for example.
@start_time = Time.now
# send is internal ruby method to call 'cmd' with *args
#(everything is a message, so this is just the formal semantics to support that idiom)
# (everything is a message, so this is just the formal semantics to support that idiom)
translate_hbase_exceptions(*args) { send(cmd, *args) }
rescue => e
rootCause = e
@ -57,7 +56,7 @@ module Shell
ensure
# If end_time is not already set by the command, use current time.
@end_time ||= Time.now
formatter.output_str("Took %.4f seconds" % [@end_time - @start_time])
formatter.output_str(format('Took %.4f seconds', @end_time - @start_time))
end
# Convenience functions to get different admins
@ -109,44 +108,40 @@ module Shell
yield
rescue => cause
# let individual command handle exceptions first
if self.respond_to?(:handle_exceptions)
self.handle_exceptions(cause, *args)
end
handle_exceptions(cause, *args) if respond_to?(:handle_exceptions)
# Global HBase exception handling below if not handled by respective command above
if cause.kind_of?(org.apache.hadoop.hbase.TableNotFoundException) then
if cause.is_a?(org.apache.hadoop.hbase.TableNotFoundException)
raise "Unknown table #{args.first}!"
end
if cause.kind_of?(org.apache.hadoop.hbase.UnknownRegionException) then
if cause.is_a?(org.apache.hadoop.hbase.UnknownRegionException)
raise "Unknown region #{args.first}!"
end
if cause.kind_of?(org.apache.hadoop.hbase.NamespaceNotFoundException) then
if cause.is_a?(org.apache.hadoop.hbase.NamespaceNotFoundException)
raise "Unknown namespace #{args.first}!"
end
if cause.kind_of?(org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException) then
if cause.is_a?(org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException)
raise "Unknown snapshot #{args.first}!"
end
if cause.kind_of?(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) then
if cause.is_a?(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException)
exceptions = cause.getCauses
exceptions.each do |exception|
if exception.kind_of?(org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException) then
if exception.is_a?(org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException)
valid_cols = table(args.first).get_all_columns.map { |c| c + '*' }
raise "Unknown column family! Valid column names: #{valid_cols.join(", ")}"
raise "Unknown column family! Valid column names: #{valid_cols.join(', ')}"
end
end
end
if cause.kind_of?(org.apache.hadoop.hbase.TableExistsException) then
if cause.is_a?(org.apache.hadoop.hbase.TableExistsException)
raise "Table already exists: #{args.first}!"
end
# To be safe, here only AccessDeniedException is considered. In future
# we might support more in more generic approach when possible.
if cause.kind_of?(org.apache.hadoop.hbase.security.AccessDeniedException) then
str = java.lang.String.new("#{cause}")
if cause.is_a?(org.apache.hadoop.hbase.security.AccessDeniedException)
str = java.lang.String.new(cause.to_s)
# Error message is merged with stack trace, reference StringUtils.stringifyException
# This is to parse and get the error message from the whole.
strs = str.split("\n")
if strs.size > 0 then
raise "#{strs[0]}"
end
raise (strs[0]).to_s unless strs.empty?
end
# Throw the other exception which hasn't been handled above

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class AbortProcedure < Command
def help
return <<-EOF
<<-EOF
Given a procedure Id (and optional boolean may_interrupt_if_running parameter,
default is true), abort a procedure in hbase. Use with caution. Some procedures
might not be abortable. For experts only.
@ -39,9 +39,9 @@ Examples:
EOF
end
def command(proc_id, may_interrupt_if_running=nil)
def command(proc_id, may_interrupt_if_running = nil)
formatter.row([admin.abort_procedure?(proc_id, may_interrupt_if_running).to_s])
end
end
end
end
end

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class AddLabels < Command
def help
return <<-EOF
<<-EOF
Add a set of visibility labels.
Syntax : add_labels [label1, label2]

View File

@ -19,9 +19,9 @@
module Shell
module Commands
class AddPeer< Command
class AddPeer < Command
def help
return <<-EOF
<<-EOF
A peer can either be another HBase cluster or a custom replication endpoint. In either case an id
must be specified to identify the peer.

View File

@ -22,7 +22,7 @@ module Shell
module Commands
class AddRsgroup < Command
def help
return <<-EOF
<<-EOF
Create a new RegionServer group.
Example:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Alter < Command
def help
return <<-EOF
<<-EOF
Alter a table. Tables can be altered without disabling them first.
Altering enabled tables has caused problems
in the past, so use caution and test it before using in production.

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class AlterAsync < Command
def help
return <<-EOF
<<-EOF
Alter column family schema, does not wait for all regions to receive the
schema changes. Pass table name and a dictionary specifying new column
family schema. Dictionaries are described on the main help command output.

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class AlterNamespace < Command
def help
return <<-EOF
<<-EOF
Alter namespace properties.
To add/modify a property:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class AlterStatus < Command
def help
return <<-EOF
<<-EOF
Get the status of the alter command. Indicates the number of regions of the
table that have received the updated schema
Pass table name.
@ -30,6 +30,7 @@ hbase> alter_status 't1'
hbase> alter_status 'ns1:t1'
EOF
end
def command(table)
admin.alter_status(table)
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Append < Command
def help
return <<-EOF
<<-EOF
Appends a cell 'value' at specified table/row/column coordinates.
hbase> append 't1', 'r1', 'c1', 'value', ATTRIBUTES=>{'mykey'=>'myvalue'}
@ -35,13 +35,13 @@ t to table 't1', the corresponding command would be:
EOF
end
def command(table_name, row, column, value, args={})
def command(table_name, row, column, value, args = {})
table = table(table_name)
@start_time = Time.now
append(table, row, column, value, args)
end
def append(table, row, column, value, args={})
def append(table, row, column, value, args = {})
if current_value = table._append_internal(row, column, value, args)
puts "CURRENT VALUE = #{current_value}"
end
@ -50,5 +50,5 @@ EOF
end
end
#add incr comamnd to Table
::Hbase::Table.add_shell_command("append")
# add incr comamnd to Table
::Hbase::Table.add_shell_command('append')

View File

@ -20,9 +20,9 @@
module Shell
module Commands
class AppendPeerNamespaces< Command
class AppendPeerNamespaces < Command
def help
return <<-EOF
<<-EOF
Append some namespaces to be replicable for the specified peer.
Set a namespace in the peer config means that all tables in this

View File

@ -19,9 +19,9 @@
module Shell
module Commands
class AppendPeerTableCFs< Command
class AppendPeerTableCFs < Command
def help
return <<-EOF
<<-EOF
Append a replicable table-cf config for the specified peer
Examples:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Assign < Command
def help
return <<-EOF
<<-EOF
Assign a region. Use with caution. If region already assigned,
this command will do a force reassign. For experts only.
Examples:

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class BalanceRsgroup < Command
def help
return <<-EOF
<<-EOF
Balance a RegionServer group
Example:
@ -33,7 +33,7 @@ EOF
# Returns true if balancer was run, otherwise false.
ret = rsgroup_admin.balance_rs_group(group_name)
if ret
puts "Ran the balancer."
puts 'Ran the balancer.'
else
puts "Couldn't run the balancer."
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class BalanceSwitch < Command
def help
return <<-EOF
<<-EOF
Enable/Disable balancer. Returns previous balancer state.
Examples:
@ -31,7 +31,7 @@ EOF
end
def command(enableDisable)
prev_state = admin.balance_switch(enableDisable)? "true" : "false"
prev_state = admin.balance_switch(enableDisable) ? 'true' : 'false'
formatter.row(["Previous balancer state : #{prev_state}"])
prev_state
end

View File

@ -21,9 +21,9 @@ module Shell
module Commands
class Balancer < Command
def help
return <<-EOF
<<-EOF
Trigger the cluster balancer. Returns true if balancer ran and was able to
tell the region servers to unassign all the regions to balance (the re-assignment itself is async).
tell the region servers to unassign all the regions to balance (the re-assignment itself is async).
Otherwise false (Will not run if regions in transition).
Parameter tells master whether we should force balance even if there is region in transition.
@ -37,14 +37,14 @@ Examples:
EOF
end
def command(force=nil)
def command(force = nil)
force_balancer = 'false'
if force == 'force'
force_balancer = 'true'
elsif !force.nil?
raise ArgumentError, "Invalid argument #{force}."
end
formatter.row([admin.balancer(force_balancer)? "true": "false"])
formatter.row([admin.balancer(force_balancer) ? 'true' : 'false'])
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class BalancerEnabled < Command
def help
return <<-EOF
<<-EOF
Query the balancer's state.
Examples:
@ -29,7 +29,7 @@ Examples:
EOF
end
def command()
def command
state = admin.balancer_enabled?
formatter.row([state.to_s])
state

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class CatalogjanitorEnabled < Command
def help
return <<-EOF
<<-EOF
Query for the CatalogJanitor state (enabled/disabled?)
Examples:
@ -28,8 +28,8 @@ Examples:
EOF
end
def command()
formatter.row([admin.catalogjanitor_enabled()? "true" : "false"])
def command
formatter.row([admin.catalogjanitor_enabled ? 'true' : 'false'])
end
end
end

View File

@ -20,15 +20,16 @@ module Shell
module Commands
class CatalogjanitorRun < Command
def help
return <<-EOF
<<-EOF
Catalog janitor command to run the (garbage collection) scan from command line.
hbase> catalogjanitor_run
EOF
end
def command()
admin.catalogjanitor_run()
def command
admin.catalogjanitor_run
end
end
end

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class CatalogjanitorSwitch < Command
def help
return <<-EOF
<<-EOF
Enable/Disable CatalogJanitor. Returns previous CatalogJanitor state.
Examples:
@ -30,7 +30,7 @@ EOF
end
def command(enableDisable)
formatter.row([admin.catalogjanitor_switch(enableDisable)? "true" : "false"])
formatter.row([admin.catalogjanitor_switch(enableDisable) ? 'true' : 'false'])
end
end
end

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class CleanerChoreEnabled < Command
def help
return <<-EOF
<<-EOF
Query for the Cleaner chore state (enabled/disabled?).
Examples:
@ -28,9 +28,9 @@ Examples:
EOF
end
def command()
formatter.row([admin.cleaner_chore_enabled()? "true" : "false"])
def command
formatter.row([admin.cleaner_chore_enabled ? 'true' : 'false'])
end
end
end
end
end

View File

@ -20,16 +20,17 @@ module Shell
module Commands
class CleanerChoreRun < Command
def help
return <<-EOF
<<-EOF
Cleaner chore command for garbage collection of HFiles and WAL files.
hbase> cleaner_chore_run
EOF
end
def command()
admin.cleaner_chore_run()
def command
admin.cleaner_chore_run
end
end
end
end
end

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class CleanerChoreSwitch < Command
def help
return <<-EOF
<<-EOF
Enable/Disable Cleaner chore. Returns previous Cleaner chore state.
Examples:
@ -30,8 +30,8 @@ EOF
end
def command(enableDisable)
formatter.row([admin.cleaner_chore_switch(enableDisable)? "true" : "false"])
formatter.row([admin.cleaner_chore_switch(enableDisable) ? 'true' : 'false'])
end
end
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class ClearAuths < Command
def help
return <<-EOF
<<-EOF
Clear visibility labels from a user or group
Syntax : clear_auths 'user',[label1, label2]

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ClearCompactionQueues < Command
def help
return <<-EOF
<<-EOF
Clear compacting queues on a regionserver.
The queue_name contains short and long.
short is shortCompactions's queue,long is longCompactions's queue.
@ -38,4 +38,4 @@ module Shell
end
end
end
end
end

View File

@ -20,8 +20,8 @@ module Shell
module Commands
class CloneSnapshot < Command
def help
return <<-EOF
Create a new table by cloning the snapshot content.
<<-EOF
Create a new table by cloning the snapshot content.
There're no copies of data involved.
And writing on the newly created table will not influence the snapshot data.
@ -37,13 +37,13 @@ EOF
end
def command(snapshot_name, table, args = {})
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
raise(ArgumentError, 'Arguments should be a Hash') unless args.is_a?(Hash)
restore_acl = args.delete(RESTORE_ACL) || false
admin.clone_snapshot(snapshot_name, table, restore_acl)
end
def handle_exceptions(cause, *args)
if cause.kind_of?(org.apache.hadoop.hbase.TableExistsException) then
if cause.is_a?(org.apache.hadoop.hbase.TableExistsException)
tableName = args[1]
raise "Table already exists: #{tableName}!"
end

View File

@ -21,20 +21,20 @@ module Shell
module Commands
class CloseRegion < Command
def help
return <<-EOF
<<-EOF
Close a single region. Ask the master to close a region out on the cluster
or if 'SERVER_NAME' is supplied, ask the designated hosting regionserver to
close the region directly. Closing a region, the master expects 'REGIONNAME'
to be a fully qualified region name. When asking the hosting regionserver to
directly close a region, you pass the regions' encoded name only. A region
name looks like this:
TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.
or
Namespace:TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.
The trailing period is part of the regionserver name. A region's encoded name
is the hash at the end of a region name; e.g. 527db22f95c8a9e0116f0cc13c680396
is the hash at the end of a region name; e.g. 527db22f95c8a9e0116f0cc13c680396
(without the period). A 'SERVER_NAME' is its host, port plus startcode. For
example: host187.example.com,60020,1289493121758 (find servername in master ui
or when you do detailed status in shell). This command will end up running

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Compact < Command
def help
return <<-EOF
<<-EOF
Compact all regions in passed table or pass a region row
to compact an individual region. You can also compact a single column
family within a region.
@ -43,7 +43,7 @@ module Shell
EOF
end
def command(table_or_region_name, family = nil, type = "NORMAL")
def command(table_or_region_name, family = nil, type = 'NORMAL')
admin.compact(table_or_region_name, family, type)
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class CompactRs < Command
def help
return <<-EOF
<<-EOF
Compact all regions on passed regionserver.
Examples:
Compact all regions on a regionserver:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class CompactionState < Command
def help
return <<-EOF
<<-EOF
Gets compaction status for a table:
hbase> compaction_state 'ns1:t1'
hbase> compaction_state 't1'

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Count < Command
def help
return <<-EOF
<<-EOF
Count the number of rows in a table. Return value is the number of rows.
This operation may take a LONG time (Run '$HADOOP_HOME/bin/hadoop jar
hbase.jar rowcount' to run a counting mapreduce job). Current count is shown
@ -58,7 +58,7 @@ EOF
def count(table, params = {})
# If the second parameter is an integer, then it is the old command syntax
params = { 'INTERVAL' => params } if params.kind_of?(Fixnum)
params = { 'INTERVAL' => params } if params.is_a?(Integer)
# Merge params with defaults
params = {
@ -71,14 +71,14 @@ EOF
@start_time = Time.now
formatter.header
count = table._count_internal(params['INTERVAL'].to_i, scan) do |cnt, row|
formatter.row([ "Current count: #{cnt}, row: #{row}" ])
formatter.row(["Current count: #{cnt}, row: #{row}"])
end
formatter.footer(count)
return count
count
end
end
end
end
#Add the method table.count that calls count.count
::Hbase::Table.add_shell_command("count")
# Add the method table.count that calls count.count
::Hbase::Table.add_shell_command('count')

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Create < Command
def help
return <<-EOF
<<-EOF
Creates a table. Pass a table name, and a set of column family
specifications (at least one), and, optionally, table configuration.
Column specification can be a simple string (name), or a dictionary
@ -65,9 +65,9 @@ EOF
def command(table, *args)
admin.create(table, *args)
@end_time = Time.now
puts "Created table " + table.to_s
puts 'Created table ' + table.to_s
#and then return the table just created
# and then return the table just created
table(table)
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class CreateNamespace < Command
def help
return <<-EOF
<<-EOF
Create namespace; pass namespace name,
and optionally a dictionary of namespace configuration.
Examples:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Delete < Command
def help
return <<-EOF
<<-EOF
Put a delete cell value at specified table/row/column and optionally
timestamp coordinates. Deletes must match the deleted cell's
coordinates exactly. When scanning, a delete cell suppresses older
@ -54,5 +54,5 @@ EOF
end
end
#Add the method table.delete that calls delete.delete
::Hbase::Table.add_shell_command("delete")
# Add the method table.delete that calls delete.delete
::Hbase::Table.add_shell_command('delete')

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class DeleteAllSnapshot < Command
def help
return <<-EOF
<<-EOF
Delete all of the snapshots matching the given regex. Examples:
hbase> delete_all_snapshot 's.*'
@ -29,17 +29,17 @@ EOF
end
def command(regex)
formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME'])
list = admin.list_snapshot(regex)
count = list.size
list.each do |snapshot|
creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
creation_time = Time.at(snapshot.getCreationTime / 1000).to_s
formatter.row([snapshot.getName, snapshot.getTable + ' (' + creation_time + ')'])
end
puts "\nDelete the above #{count} snapshots (y/n)?" unless count == 0
answer = 'n'
answer = gets.chomp unless count == 0
puts "No snapshots matched the regex #{regex.to_s}" if count == 0
puts "No snapshots matched the regex #{regex}" if count == 0
return unless answer =~ /y.*/i
@start_time = Time.now
admin.delete_all_snapshot(regex)
@ -50,10 +50,10 @@ EOF
puts "#{successfullyDeleted} snapshots successfully deleted." unless successfullyDeleted == 0
return if leftOverSnapshotCount == 0
puts "\nFailed to delete the below #{leftOverSnapshotCount} snapshots."
formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME'])
list.each do |snapshot|
creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
creation_time = Time.at(snapshot.getCreationTime / 1000).to_s
formatter.row([snapshot.getName, snapshot.getTable + ' (' + creation_time + ')'])
end
end
end

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class DeleteSnapshot < Command
def help
return <<-EOF
<<-EOF
Delete a specified snapshot. Examples:
hbase> delete_snapshot 'snapshotName',

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class DeleteTableSnapshots < Command
def help
return <<-EOF
<<-EOF
Delete all of the snapshots matching the given table name regular expression
and snapshot name regular expression.
By default snapshot name regular expression will delete all the snapshots of the
@ -37,18 +37,18 @@ Examples:
EOF
end
def command(tableNameregex, snapshotNameRegex = ".*")
formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
def command(tableNameregex, snapshotNameRegex = '.*')
formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME'])
list = admin.list_table_snapshots(tableNameregex, snapshotNameRegex)
count = list.size
list.each do |snapshot|
creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
creation_time = Time.at(snapshot.getCreationTime / 1000).to_s
formatter.row([snapshot.getName, snapshot.getTable + ' (' + creation_time + ')'])
end
puts "\nDelete the above #{count} snapshots (y/n)?" unless count == 0
answer = 'n'
answer = gets.chomp unless count == 0
puts "No snapshots matched the table name regular expression #{tableNameregex.to_s} and the snapshot name regular expression #{snapshotNameRegex.to_s}" if count == 0
puts "No snapshots matched the table name regular expression #{tableNameregex} and the snapshot name regular expression #{snapshotNameRegex}" if count == 0
return unless answer =~ /y.*/i
@start_time = Time.now
@ -58,7 +58,7 @@ EOF
puts "Successfully deleted snapshot: #{deleteSnapshot.getName}"
puts "\n"
rescue RuntimeError
puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $!
puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $ERROR_INFO
puts "\n"
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Deleteall < Command
def help
return <<-EOF
<<-EOF
Delete all cells in a given row; pass a table name, row, and optionally
a column and timestamp. Deleteall also support deleting a row range using a
row key prefix. Examples:
@ -64,5 +64,5 @@ EOF
end
end
#Add the method table.deleteall that calls deleteall.deleteall
::Hbase::Table.add_shell_command("deleteall")
# Add the method table.deleteall that calls deleteall.deleteall
::Hbase::Table.add_shell_command('deleteall')

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Describe < Command
def help
return <<-EOF
<<-EOF
Describe the named table. For example:
hbase> describe 't1'
hbase> describe 'ns1:t1'
@ -35,13 +35,13 @@ EOF
def command(table)
column_families = admin.get_column_families(table)
formatter.header(["Table " + table.to_s + " is " + if admin.enabled?(table) then "ENABLED" else "DISABLED" end])
formatter.header(['Table ' + table.to_s + ' is ' + (admin.enabled?(table) ? 'ENABLED' : 'DISABLED')])
formatter.row([table.to_s + admin.get_table_attributes(table)], true)
formatter.header(["COLUMN FAMILIES DESCRIPTION"])
formatter.header(['COLUMN FAMILIES DESCRIPTION'])
column_families.each do |column_family|
formatter.row([ column_family.to_s ], true)
formatter.row([column_family.to_s], true)
end
formatter.footer()
formatter.footer
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class DescribeNamespace < Command
def help
return <<-EOF
<<-EOF
Describe the named namespace. For example:
hbase> describe_namespace 'ns1'
EOF
@ -30,8 +30,8 @@ EOF
def command(namespace)
desc = admin.describe_namespace(namespace)
formatter.header([ "DESCRIPTION" ], [ 64 ])
formatter.row([ desc ], true, [ 64 ])
formatter.header(['DESCRIPTION'], [64])
formatter.row([desc], true, [64])
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Disable < Command
def help
return <<-EOF
<<-EOF
Start disable of named table:
hbase> disable 't1'
hbase> disable 'ns1:t1'

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class DisableAll < Command
def help
return <<-EOF
<<-EOF
Disable all of tables matching the given regex:
hbase> disable_all 't.*'
@ -34,16 +34,16 @@ EOF
list = admin.list(regex)
count = list.size
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
puts "\nDisable the above #{count} tables (y/n)?" unless count == 0
answer = 'n'
answer = gets.chomp unless count == 0
puts "No tables matched the regex #{regex.to_s}" if count == 0
puts "No tables matched the regex #{regex}" if count == 0
return unless answer =~ /y.*/i
failed = admin.disable_all(regex)
puts "#{count - failed.size} tables successfully disabled"
puts "#{failed.size} tables not disabled due to an exception: #{failed.join ','}" unless failed.size == 0
puts "#{failed.size} tables not disabled due to an exception: #{failed.join ','}" unless failed.empty?
end
end
end

View File

@ -19,9 +19,9 @@
module Shell
module Commands
class DisablePeer< Command
class DisablePeer < Command
def help
return <<-EOF
<<-EOF
Stops the replication stream to the specified cluster, but still
keeps track of new edits to replicate.

View File

@ -18,9 +18,9 @@
module Shell
module Commands
class DisableTableReplication< Command
class DisableTableReplication < Command
def help
return <<-EOF
<<-EOF
Disable a table's replication switch.
Examples:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Drop < Command
def help
return <<-EOF
<<-EOF
Drop the named table. Table must first be disabled:
hbase> drop 't1'
hbase> drop 'ns1:t1'

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class DropAll < Command
def help
return <<-EOF
<<-EOF
Drop all of the tables matching the given regex:
hbase> drop_all 't.*'
@ -34,16 +34,16 @@ EOF
list = admin.list(regex)
count = list.size
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
puts "\nDrop the above #{count} tables (y/n)?" unless count == 0
answer = 'n'
answer = gets.chomp unless count == 0
puts "No tables matched the regex #{regex.to_s}" if count == 0
puts "No tables matched the regex #{regex}" if count == 0
return unless answer =~ /y.*/i
failed = admin.drop_all(regex)
puts "#{count - failed.size} tables successfully dropped"
puts "#{failed.size} tables not dropped due to an exception: #{failed.join ','}" unless failed.size == 0
puts "#{failed.size} tables not dropped due to an exception: #{failed.join ','}" unless failed.empty?
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class DropNamespace < Command
def help
return <<-EOF
<<-EOF
Drop the named namespace. The namespace must be empty.
EOF
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Enable < Command
def help
return <<-EOF
<<-EOF
Start enable of named table:
hbase> enable 't1'
hbase> enable 'ns1:t1'

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class EnableAll < Command
def help
return <<-EOF
<<-EOF
Enable all of the tables matching the given regex:
hbase> enable_all 't.*'
@ -34,16 +34,16 @@ EOF
list = admin.list(regex)
count = list.size
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
puts "\nEnable the above #{count} tables (y/n)?" unless count == 0
answer = 'n'
answer = gets.chomp unless count == 0
puts "No tables matched the regex #{regex.to_s}" if count == 0
puts "No tables matched the regex #{regex}" if count == 0
return unless answer =~ /y.*/i
failed = admin.enable_all(regex)
puts "#{count - failed.size} tables successfully enabled"
puts "#{failed.size} tables not enabled due to an exception: #{failed.join ','}" unless failed.size == 0
puts "#{failed.size} tables not enabled due to an exception: #{failed.join ','}" unless failed.empty?
end
end
end

View File

@ -19,9 +19,9 @@
module Shell
module Commands
class EnablePeer< Command
class EnablePeer < Command
def help
return <<-EOF
<<-EOF
Restarts the replication to the specified peer cluster,
continuing from where it was disabled.

View File

@ -18,9 +18,9 @@
module Shell
module Commands
class EnableTableReplication< Command
class EnableTableReplication < Command
def help
return <<-EOF
<<-EOF
Enable a table's replication switch.
Examples:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Exists < Command
def help
return <<-EOF
<<-EOF
Does the named table exist?
hbase> exists 't1'
hbase> exists 'ns1:t1'
@ -31,8 +31,8 @@ EOF
def command(table)
exists = admin.exists?(table.to_s)
formatter.row([
"Table #{table} " + (exists ? "does exist" : "does not exist")
])
"Table #{table} " + (exists ? 'does exist' : 'does not exist')
])
exists
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Flush < Command
def help
return <<-EOF
<<-EOF
Flush all regions in passed table or pass a region row to
flush an individual region. For example:

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Get < Command
def help
return <<-EOF
<<-EOF
Get row or cell contents; pass table name, row, and optionally
a dictionary of column(s), timestamp, timerange and versions. Examples:
@ -44,14 +44,14 @@ a dictionary of column(s), timestamp, timerange and versions. Examples:
Besides the default 'toStringBinary' format, 'get' also supports custom formatting by
column. A user can define a FORMATTER by adding it to the column name in the get
specification. The FORMATTER can be stipulated:
specification. The FORMATTER can be stipulated:
1. either as a org.apache.hadoop.hbase.util.Bytes method name (e.g, toInt, toString)
2. or as a custom class followed by method name: e.g. 'c(MyFormatterClass).format'.
Example formatting cf:qualifier1 and cf:qualifier2 both as Integers:
Example formatting cf:qualifier1 and cf:qualifier2 both as Integers:
hbase> get 't1', 'r1' {COLUMN => ['cf:qualifier1:toInt',
'cf:qualifier2:c(org.apache.hadoop.hbase.util.Bytes).toInt'] }
'cf:qualifier2:c(org.apache.hadoop.hbase.util.Bytes).toInt'] }
Note that you can specify a FORMATTER by column only (cf:qualifier). You can set a
formatter for all columns (including, all key parts) using the "FORMATTER"
@ -60,7 +60,7 @@ and "FORMATTER_CLASS" options. The default "FORMATTER_CLASS" is
hbase> get 't1', 'r1', {FORMATTER => 'toString'}
hbase> get 't1', 'r1', {FORMATTER_CLASS => 'org.apache.hadoop.hbase.util.Bytes', FORMATTER => 'toString'}
The same commands also can be run on a reference to a table (obtained via get_table or
create_table). Suppose you had a reference t to table 't1', the corresponding commands
would be:
@ -87,10 +87,10 @@ EOF
def get(table, row, *args)
@start_time = Time.now
formatter.header(["COLUMN", "CELL"])
formatter.header(%w[COLUMN CELL])
count, is_stale = table._get_internal(row, *args) do |column, value|
formatter.row([ column, value ])
formatter.row([column, value])
end
formatter.footer(count, is_stale)
@ -99,5 +99,5 @@ EOF
end
end
#add get command to table
# add get command to table
::Hbase::Table.add_shell_command('get')

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class GetAuths < Command
def help
return <<-EOF
<<-EOF
Get the visibility labels set for a particular user or group
Syntax : get_auths 'user'
@ -33,7 +33,7 @@ EOF
def command(user)
list = visibility_labels_admin.get_auths(user)
list.each do |auths|
formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)])
formatter.row([org.apache.hadoop.hbase.util.Bytes.toStringBinary(auths.toByteArray)])
end
list
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class GetCounter < Command
def help
return <<-EOF
<<-EOF
Return a counter cell value at specified table/row/column coordinates.
A counter cell should be managed with atomic increment functions on HBase
and the data should be binary encoded (as long value). Example:
@ -44,7 +44,7 @@ EOF
if cnt = table._get_counter_internal(row, column)
puts "COUNTER VALUE = #{cnt}"
else
puts "No counter found at specified coordinates"
puts 'No counter found at specified coordinates'
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class GetPeerConfig < Command
def help
return <<-EOF
<<-EOF
Outputs the cluster key, replication endpoint class (if present), and any replication configuration parameters
EOF
end
@ -35,19 +35,14 @@ module Shell
cluster_key = peer_config.get_cluster_key
endpoint = peer_config.get_replication_endpoint_impl
unless cluster_key.nil?
formatter.row(["Cluster Key", cluster_key])
end
unless endpoint.nil?
formatter.row(["Replication Endpoint", endpoint])
end
formatter.row(['Cluster Key', cluster_key]) unless cluster_key.nil?
formatter.row(['Replication Endpoint', endpoint]) unless endpoint.nil?
unless peer_config.get_configuration.nil?
peer_config.get_configuration.each do |config_entry|
formatter.row(config_entry)
end
end
end
end
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class GetRsgroup < Command
def help
return <<-EOF
<<-EOF
Get a RegionServer group's information.
Example:
@ -34,7 +34,7 @@ EOF
rsgroup_admin.get_rsgroup(group_name) do |s|
formatter.row([s])
end
formatter.footer()
formatter.footer
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class GetServerRsgroup < Command
def help
return <<-EOF
<<-EOF
Get the group name the given RegionServer is a member of.
Example:

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class GetSplits < Command
def help
return <<-EOF
<<-EOF
Get the splits of the named table:
hbase> get_splits 't1'
hbase> get_splits 'ns1:t1'
@ -37,10 +37,10 @@ EOF
end
def get_splits(table)
table._get_splits_internal()
table._get_splits_internal
end
end
end
end
::Hbase::Table.add_shell_command("get_splits")
::Hbase::Table.add_shell_command('get_splits')

View File

@ -20,7 +20,7 @@ module Shell
module Commands
class GetTable < Command
def help
return <<-EOF
<<-EOF
Get the given table name and return it as an actual object to
be manipulated by the user. See table.help for more information
on how to use the table.
@ -37,7 +37,7 @@ which will then print the help for that table.
EOF
end
def command(table, *args)
def command(table, *_args)
table(table)
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class GetTableRsgroup < Command
def help
return <<-EOF
<<-EOF
Get the RegionServer group name the given table is a member of.
Example:
@ -31,7 +31,7 @@ EOF
def command(table)
group_name =
rsgroup_admin.get_rsgroup_of_table(table).getName
rsgroup_admin.get_rsgroup_of_table(table).getName
formatter.row([group_name])
formatter.footer(1)
end

View File

@ -20,15 +20,15 @@ module Shell
module Commands
class Grant < Command
def help
return <<-EOF
<<-EOF
Grant users specific rights.
Syntax : grant <user>, <permissions> [, <@namespace> [, <table> [, <column family> [, <column qualifier>]]]
permissions is either zero or more letters from the set "RWXCA".
READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A')
Note: Groups and users are granted access in the same way, but groups are prefixed with an '@'
character. In the same way, tables and namespaces are specified, but namespaces are
Note: Groups and users are granted access in the same way, but groups are prefixed with an '@'
character. In the same way, tables and namespaces are specified, but namespaces are
prefixed with an '@' character.
For example:
@ -42,12 +42,11 @@ EOF
end
def command(*args)
# command form is ambiguous at first argument
table_name = user = args[0]
raise(ArgumentError, "First argument should be a String") unless user.kind_of?(String)
raise(ArgumentError, 'First argument should be a String') unless user.is_a?(String)
if args[1].kind_of?(String)
if args[1].is_a?(String)
# Original form of the command
# user in args[0]
@ -57,27 +56,30 @@ EOF
# qualifier in args[4] or nil
permissions = args[1]
raise(ArgumentError, "Permissions are not of String type") unless permissions.kind_of?(
String)
raise(ArgumentError, 'Permissions are not of String type') unless permissions.is_a?(
String
)
table_name = family = qualifier = nil
table_name = args[2] # will be nil if unset
if not table_name.nil?
raise(ArgumentError, "Table name is not of String type") unless table_name.kind_of?(
String)
family = args[3] # will be nil if unset
if not family.nil?
raise(ArgumentError, "Family is not of String type") unless family.kind_of?(String)
qualifier = args[4] # will be nil if unset
if not qualifier.nil?
raise(ArgumentError, "Qualifier is not of String type") unless qualifier.kind_of?(
String)
unless table_name.nil?
raise(ArgumentError, 'Table name is not of String type') unless table_name.is_a?(
String
)
family = args[3] # will be nil if unset
unless family.nil?
raise(ArgumentError, 'Family is not of String type') unless family.is_a?(String)
qualifier = args[4] # will be nil if unset
unless qualifier.nil?
raise(ArgumentError, 'Qualifier is not of String type') unless qualifier.is_a?(
String
)
end
end
end
@start_time = Time.now
security_admin.grant(user, permissions, table_name, family, qualifier)
elsif args[1].kind_of?(Hash)
elsif args[1].is_a?(Hash)
# New form of the command, a cell ACL update
# table_name in args[0], a string
@ -86,9 +88,9 @@ EOF
# Useful for feature testing and debugging.
permissions = args[1]
raise(ArgumentError, "Permissions are not of Hash type") unless permissions.kind_of?(Hash)
raise(ArgumentError, 'Permissions are not of Hash type') unless permissions.is_a?(Hash)
scan = args[2]
raise(ArgumentError, "Scanner specification is not a Hash") unless scan.kind_of?(Hash)
raise(ArgumentError, 'Scanner specification is not a Hash') unless scan.is_a?(Hash)
t = table(table_name)
@start_time = Time.now
@ -108,9 +110,8 @@ EOF
formatter.footer(count)
else
raise(ArgumentError, "Second argument should be a String or Hash")
raise(ArgumentError, 'Second argument should be a String or Hash')
end
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Incr < Command
def help
return <<-EOF
<<-EOF
Increments a cell 'value' at specified table/row/column coordinates.
To increment a cell value in table 'ns1:t1' or 't1' at row 'r1' under column
'c1' by 1 (can be omitted) or 10 do:
@ -48,16 +48,16 @@ EOF
incr(table(table), row, column, value, args)
end
def incr(table, row, column, value = nil, args={})
def incr(table, row, column, value = nil, args = {})
if cnt = table._incr_internal(row, column, value, args)
puts "COUNTER VALUE = #{cnt}"
else
puts "No counter found at specified coordinates"
puts 'No counter found at specified coordinates'
end
end
end
end
end
#add incr comamnd to Table
::Hbase::Table.add_shell_command("incr")
# add incr comamnd to Table
::Hbase::Table.add_shell_command('incr')

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class IsDisabled < Command
def help
return <<-EOF
<<-EOF
Is named table disabled? For example:
hbase> is_disabled 't1'
hbase> is_disabled 'ns1:t1'
@ -29,7 +29,7 @@ EOF
end
def command(table)
formatter.row([admin.disabled?(table)? "true" : "false"])
formatter.row([admin.disabled?(table) ? 'true' : 'false'])
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class IsEnabled < Command
def help
return <<-EOF
<<-EOF
Is named table enabled? For example:
hbase> is_enabled 't1'
hbase> is_enabled 'ns1:t1'
@ -30,7 +30,7 @@ EOF
def command(table)
enabled = admin.enabled?(table)
formatter.row([enabled ? "true" : "false"])
formatter.row([enabled ? 'true' : 'false'])
enabled
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class List < Command
def help
return <<-EOF
<<-EOF
List all user tables in hbase. Optional regular expression parameter could
be used to filter the output. Examples:
@ -32,16 +32,16 @@ be used to filter the output. Examples:
EOF
end
def command(regex = ".*")
formatter.header([ "TABLE" ])
def command(regex = '.*')
formatter.header(['TABLE'])
list = admin.list(regex)
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
formatter.footer(list.size)
return list
list
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class ListLabels < Command
def help
return <<-EOF
<<-EOF
List the visibility labels defined in the system.
Optional regular expression parameter could be used to filter the labels being returned.
Syntax : list_labels
@ -31,10 +31,10 @@ For example:
EOF
end
def command(regex = ".*")
def command(regex = '.*')
list = visibility_labels_admin.list_labels(regex)
list.each do |label|
formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(label.toByteArray)])
formatter.row([org.apache.hadoop.hbase.util.Bytes.toStringBinary(label.toByteArray)])
end
end
end

View File

@ -21,15 +21,15 @@ module Shell
module Commands
class ListLocks < Command
def help
return <<-EOF
<<-EOF
List all locks in hbase. Examples:
hbase> list_locks
EOF
end
def command()
list = admin.list_locks()
def command
list = admin.list_locks
list.each do |lock|
formatter.output_strln("#{lock.resourceType}(#{lock.resourceName})")
@ -42,17 +42,17 @@ EOF
end
if lock.waitingProcedures.any?
formatter.output_strln("Waiting procedures:")
formatter.header([ "Lock type", "Procedure Id" ])
formatter.output_strln('Waiting procedures:')
formatter.header(['Lock type', 'Procedure Id'])
lock.waitingProcedures.each do |waitingProcedure|
formatter.row([ waitingProcedure.lockType.to_s, waitingProcedure.procedure.procId.to_s ]);
formatter.row([waitingProcedure.lockType.to_s, waitingProcedure.procedure.procId.to_s])
end
formatter.footer(lock.waitingProcedures.size)
end
formatter.output_strln("");
formatter.output_strln('')
end
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ListNamespace < Command
def help
return <<-EOF
<<-EOF
List all namespaces in hbase. Optional regular expression parameter could
be used to filter the output. Examples:
@ -30,12 +30,12 @@ be used to filter the output. Examples:
EOF
end
def command(regex = ".*")
formatter.header([ "NAMESPACE" ])
def command(regex = '.*')
formatter.header(['NAMESPACE'])
list = admin.list_namespace(regex)
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
formatter.footer(list.size)

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ListNamespaceTables < Command
def help
return <<-EOF
<<-EOF
List all tables that are members of the namespace.
Examples:
@ -30,11 +30,11 @@ EOF
end
def command(namespace)
formatter.header([ "TABLE" ])
formatter.header(['TABLE'])
list = admin.list_namespace_tables(namespace)
list.each do |table|
formatter.row([ table ])
formatter.row([table])
end
formatter.footer(list.size)

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class ListPeerConfigs < Command
def help
return <<-EOF
<<-EOF
No-argument method that outputs the replication peer configuration for each peer defined on this cluster.
EOF
end
@ -30,9 +30,9 @@ module Shell
peer_configs.each do |peer_config_entry|
peer_id = peer_config_entry[0]
peer_config = peer_config_entry[1]
formatter.row(["PeerId", peer_id])
formatter.row(['PeerId', peer_id])
GetPeerConfig.new(@shell).format_peer_config(peer_config)
formatter.row([" "])
formatter.row([' '])
end
end
peer_configs

View File

@ -20,33 +20,33 @@
module Shell
module Commands
class ListPeers< Command
class ListPeers < Command
def help
return <<-EOF
<<-EOF
List all replication peer clusters.
hbase> list_peers
EOF
end
def command()
def command
peers = replication_admin.list_peers
formatter.header(["PEER_ID", "CLUSTER_KEY", "ENDPOINT_CLASSNAME",
"STATE", "NAMESPACES", "TABLE_CFS", "BANDWIDTH"])
formatter.header(%w[PEER_ID CLUSTER_KEY ENDPOINT_CLASSNAME
STATE NAMESPACES TABLE_CFS BANDWIDTH])
peers.each do |peer|
id = peer.getPeerId
state = peer.isEnabled ? "ENABLED" : "DISABLED"
state = peer.isEnabled ? 'ENABLED' : 'DISABLED'
config = peer.getPeerConfig
namespaces = replication_admin.show_peer_namespaces(config)
tableCFs = replication_admin.show_peer_tableCFs(id)
formatter.row([ id, config.getClusterKey,
config.getReplicationEndpointImpl, state, namespaces, tableCFs,
config.getBandwidth ])
formatter.row([id, config.getClusterKey,
config.getReplicationEndpointImpl, state, namespaces, tableCFs,
config.getBandwidth])
end
formatter.footer()
formatter.footer
peers
end
end

View File

@ -21,21 +21,21 @@ module Shell
module Commands
class ListProcedures < Command
def help
return <<-EOF
<<-EOF
List all procedures in hbase. For example:
hbase> list_procedures
EOF
end
def command()
formatter.header([ "Id", "Name", "State", "Submitted_Time", "Last_Update" ])
def command
formatter.header(%w[Id Name State Submitted_Time Last_Update])
list = admin.list_procedures()
list = admin.list_procedures
list.each do |proc|
submitted_time = Time.at(proc.getSubmittedTime / 1000).to_s
last_update = Time.at(proc.getLastUpdate / 1000).to_s
formatter.row([ proc.getProcId, proc.getProcName, proc.getProcState, submitted_time, last_update ])
formatter.row([proc.getProcId, proc.getProcName, proc.getProcState, submitted_time, last_update])
end
formatter.footer(list.size)

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ListQuotaSnapshots < Command
def help
return <<-EOF
<<-EOF
Lists the current space quota snapshots with optional selection criteria.
Snapshots encapsulate relevant information to space quotas such as space
use, configured limits, and quota violation details. This command is
@ -48,15 +48,15 @@ EOF
desired_table = args[TABLE]
desired_namespace = args[NAMESPACE]
desired_regionserver = args[REGIONSERVER]
formatter.header(["TABLE", "USAGE", "LIMIT", "IN_VIOLATION", "POLICY"])
formatter.header(%w[TABLE USAGE LIMIT IN_VIOLATION POLICY])
count = 0
quotas_admin.get_quota_snapshots(desired_regionserver).each do |table_name,snapshot|
quotas_admin.get_quota_snapshots(desired_regionserver).each do |table_name, snapshot|
# Skip this snapshot if it's for a table/namespace the user did not ask for
next unless accept? table_name, desired_table, desired_namespace
status = snapshot.getQuotaStatus()
status = snapshot.getQuotaStatus
policy = get_policy(status)
formatter.row([table_name.to_s, snapshot.getUsage().to_s, snapshot.getLimit().to_s,
status.isInViolation().to_s, policy])
formatter.row([table_name.to_s, snapshot.getUsage.to_s, snapshot.getLimit.to_s,
status.isInViolation.to_s, policy])
count += 1
end
formatter.footer(count)
@ -64,20 +64,20 @@ EOF
def get_policy(status)
# Unwrap the violation policy if it exists
if status.isInViolation()
status.getPolicy().name()
if status.isInViolation
status.getPolicy.name
else
"None"
'None'
end
end
def accept?(table_name, desired_table=nil, desired_namespace=nil)
def accept?(table_name, desired_table = nil, desired_namespace = nil)
# Check the table name if given one
if desired_table and table_name.getQualifierAsString() != desired_table
if desired_table && table_name.getQualifierAsString != desired_table
return false
end
# Check the namespace if given one
if desired_namespace and table_name.getNamespaceAsString() != desired_namespace
if desired_namespace && table_name.getNamespaceAsString != desired_namespace
return false
end
true

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ListQuotaTableSizes < Command
def help
return <<-EOF
<<-EOF
Lists the computed size of each table in the cluster as computed by
all RegionServers. This is the raw information that the Master uses to
make decisions about space quotas. Most times, using `list_quota_snapshots`
@ -33,10 +33,10 @@ For example:
EOF
end
def command(args = {})
formatter.header(["TABLE", "SIZE"])
def command(_args = {})
formatter.header(%w[TABLE SIZE])
count = 0
quotas_admin.get_master_table_sizes().each do |tableName,size|
quotas_admin.get_master_table_sizes.each do |tableName, size|
formatter.row([tableName.to_s, size.to_s])
count += 1
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class ListQuotas < Command
def help
return <<-EOF
<<-EOF
List the quota settings added to the system.
You can filter the result based on USER, TABLE, or NAMESPACE.
@ -37,11 +37,11 @@ EOF
end
def command(args = {})
formatter.header(["OWNER", "QUOTAS"])
formatter.header(%w[OWNER QUOTAS])
#actually do the scanning
# actually do the scanning
count = quotas_admin.list_quotas(args) do |row, cells|
formatter.row([ row, cells ])
formatter.row([row, cells])
end
formatter.footer(count)

View File

@ -21,8 +21,7 @@ module Shell
module Commands
class ListRegions < Command
def help
return<<EOF
return <<EOF
List all regions for a particular table as an array and also filter them by server name (optional) as prefix
and maximum locality (optional). By default, it will return all the regions for the table with any locality.
The command displays server name, region name, start key, end key, size of the region in MB, number of requests
@ -39,37 +38,37 @@ module Shell
hbase> list_regions 'table_name', '', ['SERVER_NAME', 'start_key']
EOF
return
nil
end
def command(table_name, options = nil, cols = nil)
if options.nil?
options = {}
elsif not options.is_a? Hash
elsif !options.is_a? Hash
# When options isn't a hash, assume it's the server name
# and create the hash internally
options = {SERVER_NAME => options}
options = { SERVER_NAME => options }
end
size_hash = Hash.new
size_hash = {}
if cols.nil?
size_hash = { "SERVER_NAME" => 12, "REGION_NAME" => 12, "START_KEY" => 10, "END_KEY" => 10, "SIZE" => 5, "REQ" => 5, "LOCALITY" => 10 }
size_hash = { 'SERVER_NAME' => 12, 'REGION_NAME' => 12, 'START_KEY' => 10, 'END_KEY' => 10, 'SIZE' => 5, 'REQ' => 5, 'LOCALITY' => 10 }
elsif cols.is_a?(Array)
cols.each do |col|
if col.upcase.eql?("SERVER_NAME")
size_hash.store("SERVER_NAME", 12)
elsif col.upcase.eql?("REGION_NAME")
size_hash.store("REGION_NAME", 12)
elsif col.upcase.eql?("START_KEY")
size_hash.store("START_KEY", 10)
elsif col.upcase.eql?("END_KEY")
size_hash.store("END_KEY", 10)
elsif col.upcase.eql?("SIZE")
size_hash.store("SIZE", 5)
elsif col.upcase.eql?("REQ")
size_hash.store("REQ", 5)
elsif col.upcase.eql?("LOCALITY")
size_hash.store("LOCALITY", 10)
if col.casecmp('SERVER_NAME').zero?
size_hash.store('SERVER_NAME', 12)
elsif col.casecmp('REGION_NAME').zero?
size_hash.store('REGION_NAME', 12)
elsif col.casecmp('START_KEY').zero?
size_hash.store('START_KEY', 10)
elsif col.casecmp('END_KEY').zero?
size_hash.store('END_KEY', 10)
elsif col.casecmp('SIZE').zero?
size_hash.store('SIZE', 5)
elsif col.casecmp('REQ').zero?
size_hash.store('REQ', 5)
elsif col.casecmp('LOCALITY').zero?
size_hash.store('LOCALITY', 10)
else
raise "#{col} is not a valid column. Possible values are SERVER_NAME, REGION_NAME, START_KEY, END_KEY, SIZE, REQ, LOCALITY."
end
@ -78,12 +77,12 @@ EOF
raise "#{cols} must be an array of strings. Possible values are SERVER_NAME, REGION_NAME, START_KEY, END_KEY, SIZE, REQ, LOCALITY."
end
admin_instance = admin.instance_variable_get("@admin")
conn_instance = admin_instance.getConnection()
cluster_status = admin_instance.getClusterStatus()
admin_instance = admin.instance_variable_get('@admin')
conn_instance = admin_instance.getConnection
cluster_status = admin_instance.getClusterStatus
hregion_locator_instance = conn_instance.getRegionLocator(TableName.valueOf(table_name))
hregion_locator_list = hregion_locator_instance.getAllRegionLocations().to_a
results = Array.new
hregion_locator_list = hregion_locator_instance.getAllRegionLocations.to_a
results = []
desired_server_name = options[SERVER_NAME]
begin
@ -92,7 +91,7 @@ EOF
# A locality threshold of "1.0" would be all regions (cannot have greater than 1 locality)
# Regions which have a `dataLocality` less-than-or-equal to this value are accepted
locality_threshold = 1.0
if options.has_key? LOCALITY_THRESHOLD
if options.key? LOCALITY_THRESHOLD
value = options[LOCALITY_THRESHOLD]
# Value validation. Must be a Float, and must be between [0, 1.0]
raise "#{LOCALITY_THRESHOLD} must be a float value" unless value.is_a? Float
@ -101,88 +100,86 @@ EOF
end
regions.each do |hregion|
hregion_info = hregion.getRegionInfo()
server_name = hregion.getServerName()
region_load_map = cluster_status.getLoad(server_name).getRegionsLoad()
region_load = region_load_map.get(hregion_info.getRegionName())
hregion_info = hregion.getRegionInfo
server_name = hregion.getServerName
region_load_map = cluster_status.getLoad(server_name).getRegionsLoad
region_load = region_load_map.get(hregion_info.getRegionName)
# Ignore regions which exceed our locality threshold
if accept_region_for_locality? region_load.getDataLocality(), locality_threshold
result_hash = Hash.new
next unless accept_region_for_locality? region_load.getDataLocality, locality_threshold
result_hash = {}
if size_hash.key?("SERVER_NAME")
result_hash.store("SERVER_NAME", server_name.toString().strip)
size_hash["SERVER_NAME"] = [size_hash["SERVER_NAME"], server_name.toString().strip.length].max
end
if size_hash.key?("REGION_NAME")
result_hash.store("REGION_NAME", hregion_info.getRegionNameAsString().strip)
size_hash["REGION_NAME"] = [size_hash["REGION_NAME"], hregion_info.getRegionNameAsString().length].max
end
if size_hash.key?("START_KEY")
startKey = Bytes.toStringBinary(hregion_info.getStartKey()).strip
result_hash.store("START_KEY", startKey)
size_hash["START_KEY"] = [size_hash["START_KEY"], startKey.length].max
end
if size_hash.key?("END_KEY")
endKey = Bytes.toStringBinary(hregion_info.getEndKey()).strip
result_hash.store("END_KEY", endKey)
size_hash["END_KEY"] = [size_hash["END_KEY"], endKey.length].max
end
if size_hash.key?("SIZE")
region_store_file_size = region_load.getStorefileSizeMB().to_s.strip
result_hash.store("SIZE", region_store_file_size)
size_hash["SIZE"] = [size_hash["SIZE"], region_store_file_size.length].max
end
if size_hash.key?("REQ")
region_requests = region_load.getRequestsCount().to_s.strip
result_hash.store("REQ", region_requests)
size_hash["REQ"] = [size_hash["REQ"], region_requests.length].max
end
if size_hash.key?("LOCALITY")
locality = region_load.getDataLocality().to_s.strip
result_hash.store("LOCALITY", locality)
size_hash["LOCALITY"] = [size_hash["LOCALITY"], locality.length].max
end
results << result_hash
if size_hash.key?('SERVER_NAME')
result_hash.store('SERVER_NAME', server_name.toString.strip)
size_hash['SERVER_NAME'] = [size_hash['SERVER_NAME'], server_name.toString.strip.length].max
end
if size_hash.key?('REGION_NAME')
result_hash.store('REGION_NAME', hregion_info.getRegionNameAsString.strip)
size_hash['REGION_NAME'] = [size_hash['REGION_NAME'], hregion_info.getRegionNameAsString.length].max
end
if size_hash.key?('START_KEY')
startKey = Bytes.toStringBinary(hregion_info.getStartKey).strip
result_hash.store('START_KEY', startKey)
size_hash['START_KEY'] = [size_hash['START_KEY'], startKey.length].max
end
if size_hash.key?('END_KEY')
endKey = Bytes.toStringBinary(hregion_info.getEndKey).strip
result_hash.store('END_KEY', endKey)
size_hash['END_KEY'] = [size_hash['END_KEY'], endKey.length].max
end
if size_hash.key?('SIZE')
region_store_file_size = region_load.getStorefileSizeMB.to_s.strip
result_hash.store('SIZE', region_store_file_size)
size_hash['SIZE'] = [size_hash['SIZE'], region_store_file_size.length].max
end
if size_hash.key?('REQ')
region_requests = region_load.getRequestsCount.to_s.strip
result_hash.store('REQ', region_requests)
size_hash['REQ'] = [size_hash['REQ'], region_requests.length].max
end
if size_hash.key?('LOCALITY')
locality = region_load.getDataLocality.to_s.strip
result_hash.store('LOCALITY', locality)
size_hash['LOCALITY'] = [size_hash['LOCALITY'], locality.length].max
end
results << result_hash
end
ensure
hregion_locator_instance.close()
hregion_locator_instance.close
end
@end_time = Time.now
size_hash.each do | param, length |
size_hash.each do |param, length|
printf(" %#{length}s |", param)
end
printf("\n")
size_hash.each do | param, length |
str = "-" * length
size_hash.each do |_param, length|
str = '-' * length
printf(" %#{length}s |", str)
end
printf("\n")
results.each do | result |
size_hash.each do | param, length |
results.each do |result|
size_hash.each do |param, length|
printf(" %#{length}s |", result[param])
end
printf("\n")
end
printf(" %d rows\n", results.size)
end
def valid_locality_threshold?(value)
value >= 0 and value <= 1.0
value >= 0 && value <= 1.0
end
def get_regions_for_table_and_server(table_name, conn, server_name)
@ -191,16 +188,16 @@ EOF
def get_regions_for_server(regions_for_table, server_name)
regions_for_table.select do |hregion|
accept_server_name? server_name, hregion.getServerName().toString()
accept_server_name? server_name, hregion.getServerName.toString
end
end
def get_regions_for_table(table_name, conn)
conn.getRegionLocator(TableName.valueOf(table_name)).getAllRegionLocations().to_a
conn.getRegionLocator(TableName.valueOf(table_name)).getAllRegionLocations.to_a
end
def accept_server_name?(desired_server_name, actual_server_name)
desired_server_name.nil? or actual_server_name.start_with? desired_server_name
desired_server_name.nil? || actual_server_name.start_with?(desired_server_name)
end
def accept_region_for_locality?(actual_locality, locality_threshold)

View File

@ -20,9 +20,9 @@
module Shell
module Commands
class ListReplicatedTables< Command
class ListReplicatedTables < Command
def help
return <<-EOF
<<-EOF
List all the tables and column families replicated from this cluster
hbase> list_replicated_tables
@ -30,25 +30,25 @@ List all the tables and column families replicated from this cluster
EOF
end
def command(regex = ".*")
formatter.header([ "TABLE:COLUMNFAMILY", "ReplicationType" ], [ 32 ])
def command(regex = '.*')
formatter.header(['TABLE:COLUMNFAMILY', 'ReplicationType'], [32])
list = replication_admin.list_replicated_tables(regex)
list.each do |e|
map = e.getColumnFamilyMap()
map = e.getColumnFamilyMap
map.each do |cf|
if cf[1] == org.apache.hadoop.hbase.HConstants::REPLICATION_SCOPE_LOCAL
replicateType = "LOCAL"
replicateType = 'LOCAL'
elsif cf[1] == org.apache.hadoop.hbase.HConstants::REPLICATION_SCOPE_GLOBAL
replicateType = "GLOBAL"
replicateType = 'GLOBAL'
elsif cf[1] == org.apache.hadoop.hbase.HConstants::REPLICATION_SCOPE_SERIAL
replicateType = "SERIAL"
replicateType = 'SERIAL'
else
replicateType = "UNKNOWN"
replicateType = 'UNKNOWN'
end
formatter.row([e.getTable().getNameAsString() + ":" + cf[0], replicateType], true, [32])
formatter.row([e.getTable.getNameAsString + ':' + cf[0], replicateType], true, [32])
end
end
formatter.footer()
formatter.footer
end
end
end

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class ListRsgroups < Command
def help
return <<-EOF
<<-EOF
List all RegionServer groups. Optional regular expression parameter can
be used to filter the output.

View File

@ -19,7 +19,7 @@ module Shell
module Commands
class ListSecurityCapabilities < Command
def help
return <<-EOF
<<-EOF
List supported security capabilities
Example:
@ -27,20 +27,18 @@ Example:
EOF
end
def command()
begin
list = admin.get_security_capabilities
list.each do |s|
puts s.getName
end
return list.map { |s| s.getName() }
rescue Exception => e
if e.to_s.include? "UnsupportedOperationException"
puts "ERROR: Master does not support getSecurityCapabilities"
return []
end
raise e
def command
list = admin.get_security_capabilities
list.each do |s|
puts s.getName
end
return list.map(&:getName)
rescue Exception => e
if e.to_s.include? 'UnsupportedOperationException'
puts 'ERROR: Master does not support getSecurityCapabilities'
return []
end
raise e
end
end
end

View File

@ -21,17 +21,17 @@ module Shell
module Commands
class ListSnapshotSizes < Command
def help
return <<-EOF
<<-EOF
Lists the size of every HBase snapshot given the space quota size computation
algorithms. An HBase snapshot only "owns" the size of a file when the table
from which the snapshot was created no longer refers to that file.
EOF
end
def command(args = {})
formatter.header(["SNAPSHOT", "SIZE"])
def command(_args = {})
formatter.header(%w[SNAPSHOT SIZE])
count = 0
quotas_admin.list_snapshot_sizes().each do |snapshot,size|
quotas_admin.list_snapshot_sizes.each do |snapshot, size|
formatter.row([snapshot.to_s, size.to_s])
count += 1
end

View File

@ -22,7 +22,7 @@ module Shell
module Commands
class ListSnapshots < Command
def help
return <<-EOF
<<-EOF
List all snapshots taken (by printing the names and relative information).
Optional regular expression parameter could be used to filter the output
by snapshot name.
@ -33,17 +33,17 @@ Examples:
EOF
end
def command(regex = ".*")
formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
def command(regex = '.*')
formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME'])
list = admin.list_snapshot(regex)
list.each do |snapshot|
creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
creation_time = Time.at(snapshot.getCreationTime / 1000).to_s
formatter.row([snapshot.getName, snapshot.getTable + ' (' + creation_time + ')'])
end
formatter.footer(list.size)
return list.map { |s| s.getName() }
list.map(&:getName)
end
end
end

View File

@ -22,7 +22,7 @@ module Shell
module Commands
class ListTableSnapshots < Command
def help
return <<-EOF
<<-EOF
List all completed snapshots matching the table name regular expression and the
snapshot name regular expression (by printing the names and relative information).
Optional snapshot name regular expression parameter could be used to filter the output
@ -38,17 +38,17 @@ Examples:
EOF
end
def command(tableNameRegex, snapshotNameRegex = ".*")
formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
def command(tableNameRegex, snapshotNameRegex = '.*')
formatter.header(['SNAPSHOT', 'TABLE + CREATION TIME'])
list = admin.list_table_snapshots(tableNameRegex, snapshotNameRegex)
list.each do |snapshot|
creation_time = Time.at(snapshot.getCreationTime() / 1000).to_s
formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
creation_time = Time.at(snapshot.getCreationTime / 1000).to_s
formatter.row([snapshot.getName, snapshot.getTable + ' (' + creation_time + ')'])
end
formatter.footer(list.size)
return list.map { |s| s.getName() }
list.map(&:getName)
end
end
end

View File

@ -22,7 +22,7 @@ module Shell
module Commands
class LocateRegion < Command
def help
return <<-EOF
<<-EOF
Locate the region given a table name and a row-key
hbase> locate_region 'tableName', 'key0'
@ -31,10 +31,10 @@ EOF
def command(table, row_key)
region_location = admin.locate_region(table, row_key)
hri = region_location.getRegionInfo()
hri = region_location.getRegionInfo
formatter.header([ "HOST", "REGION" ])
formatter.row([region_location.getHostnamePort(), hri.toString()])
formatter.header(%w[HOST REGION])
formatter.row([region_location.getHostnamePort, hri.toString])
formatter.footer(1)
region_location
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class MajorCompact < Command
def help
return <<-EOF
<<-EOF
Run major compaction on passed table or pass a region row
to major compact an individual region. To compact a single
column family within a region specify the region name
@ -43,7 +43,7 @@ module Shell
EOF
end
def command(table_or_region_name, family = nil, type = "NORMAL")
def command(table_or_region_name, family = nil, type = 'NORMAL')
admin.major_compact(table_or_region_name, family, type)
end
end

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class MergeRegion < Command
def help
return <<-EOF
<<-EOF
Merge two regions. Passing 'true' as the optional third parameter will force
a merge ('force' merges regardless else merge will fail unless passed
adjacent regions. 'force' is for expert use only).

View File

@ -21,7 +21,7 @@ module Shell
module Commands
class Move < Command
def help
return <<-EOF
<<-EOF
Move a region. Optionally specify target regionserver else we choose one
at random. NOTE: You pass the encoded region name, not the region name so
this command is a little different to the others. The encoded region name

Some files were not shown because too many files have changed in this diff Show More