HBASE-22783 shell with removal of status and load classes

Signed-off-by: stack <stack@apache.org>
Signed-off-by: Andrew Purtell <apurtell@apache.org>
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com>
This commit is contained in:
Viraj Jasani 2019-08-06 22:50:22 +05:30 committed by Jan Hentschel
parent 9d8271624e
commit f02f741f99
6 changed files with 69 additions and 54 deletions

View File

@ -49,7 +49,7 @@ optparse.parse!
# Return array of servernames where servername is hostname+port+startcode
# comma-delimited
def getServers(admin)
serverInfos = admin.getClusterStatus.getServers
serverInfos = admin.getClusterMetrics.getLiveServerMetrics.keySet
servers = []
serverInfos.each do |server|
servers << server.getServerName

View File

@ -132,7 +132,7 @@ meta_count += 1 if $tablename.nil?
$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
loop do
if $tablename.nil?
server_count = admin.getClusterStatus.getRegionsCount
server_count = admin.getClusterMetrics.getRegionCount
else
connection = ConnectionFactory.createConnection(config)
server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size

View File

@ -802,18 +802,19 @@ module Hbase
end
def status(format, type)
status = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
cluster_metrics = @admin.getClusterMetrics
if format == 'detailed'
puts(format('version %s', status.getHBaseVersion))
puts(format('version %s', cluster_metrics.getHBaseVersion))
# Put regions in transition first because usually empty
puts(format('%d regionsInTransition', status.getRegionStatesInTransition.size))
for v in status.getRegionStatesInTransition
puts(format('%d regionsInTransition', cluster_metrics.getRegionStatesInTransition.size))
for v in cluster_metrics.getRegionStatesInTransition
puts(format(' %s', v))
end
master = status.getMaster
puts(format('active master: %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
puts(format('%d backup masters', status.getBackupMastersSize))
for server in status.getBackupMasters
master = cluster_metrics.getMasterName
puts(format('active master: %s:%d %d', master.getHostname, master.getPort,
master.getStartcode))
puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
for server in cluster_metrics.getBackupMasterNames
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
end
@ -821,24 +822,25 @@ module Hbase
unless master_coprocs.nil?
puts(format('master coprocessors: %s', master_coprocs))
end
puts(format('%d live servers', status.getServersSize))
for server in status.getServers
puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
for server in cluster_metrics.getLiveServerMetrics.keySet
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
puts(format(' %s', status.getLoad(server).toString))
for name, region in status.getLoad(server).getRegionsLoad
puts(format(' %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
for name, region in cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics
puts(format(' %s', region.getNameAsString.dump))
puts(format(' %s', region.toString))
end
end
puts(format('%d dead servers', status.getDeadServersSize))
for server in status.getDeadServerNames
puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
for server in cluster_metrics.getDeadServerNames
puts(format(' %s', server))
end
elsif format == 'replication'
puts(format('version %<version>s', version: status.getHBaseVersion))
puts(format('%<servers>d live servers', servers: status.getServersSize))
status.getServers.each do |server_status|
sl = status.getLoad(server_status)
puts(format('version %<version>s', version: cluster_metrics.getHBaseVersion))
puts(format('%<servers>d live servers',
servers: cluster_metrics.getLiveServerMetrics.size))
cluster_metrics.getLiveServerMetrics.keySet.each do |server_name|
sl = cluster_metrics.getLiveServerMetrics.get(server_name)
r_sink_string = ' SINK:'
r_source_string = ' SOURCE:'
r_load_sink = sl.getReplicationLoadSink
@ -851,7 +853,7 @@ module Hbase
.getTimestampsOfLastAppliedOp).toString
r_load_source_map = sl.getReplicationLoadSourceMap
build_source_string(r_load_source_map, r_source_string)
puts(format(' %<host>s:', host: server_status.getHostname))
puts(format(' %<host>s:', host: server_name.getHostname))
if type.casecmp('SOURCE').zero?
puts(format('%<source>s', source: r_source_string))
elsif type.casecmp('SINK').zero?
@ -864,26 +866,30 @@ module Hbase
elsif format == 'simple'
load = 0
regions = 0
master = status.getMaster
puts(format('active master: %s:%d %d', master.getHostname, master.getPort, master.getStartcode))
puts(format('%d backup masters', status.getBackupMastersSize))
for server in status.getBackupMasters
master = cluster_metrics.getMasterName
puts(format('active master: %s:%d %d', master.getHostname, master.getPort,
master.getStartcode))
puts(format('%d backup masters', cluster_metrics.getBackupMasterNames.size))
for server in cluster_metrics.getBackupMasterNames
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
end
puts(format('%d live servers', status.getServersSize))
for server in status.getServers
puts(format('%d live servers', cluster_metrics.getLiveServerMetrics.size))
for server in cluster_metrics.getLiveServerMetrics.keySet
puts(format(' %s:%d %d', server.getHostname, server.getPort, server.getStartcode))
puts(format(' %s', status.getLoad(server).toString))
load += status.getLoad(server).getNumberOfRequests
regions += status.getLoad(server).getNumberOfRegions
puts(format(' %s', cluster_metrics.getLiveServerMetrics.get(server).toString))
load += cluster_metrics.getLiveServerMetrics.get(server).getRequestCountPerSecond
regions += cluster_metrics.getLiveServerMetrics.get(server).getRegionMetrics.size
end
puts(format('%d dead servers', status.getDeadServers))
for server in status.getDeadServerNames
puts(format('%d dead servers', cluster_metrics.getDeadServerNames.size))
for server in cluster_metrics.getDeadServerNames
puts(format(' %s', server))
end
puts(format('Aggregate load: %d, regions: %d', load, regions))
else
puts "1 active master, #{status.getBackupMastersSize} backup masters, #{status.getServersSize} servers, #{status.getDeadServers} dead, #{format('%.4f', status.getAverageLoad)} average load"
puts "1 active master, #{cluster_metrics.getBackupMasterNames.size} backup masters,
#{cluster_metrics.getLiveServerMetrics.size} servers,
#{cluster_metrics.getDeadServerNames.size} dead,
#{format('%.4f', cluster_metrics.getAverageLoad)} average load"
end
end
@ -1176,15 +1182,23 @@ module Hbase
end
#----------------------------------------------------------------------------------------------
# Returns the ClusterStatus of the cluster
def getClusterStatus
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics)
# Returns the whole ClusterMetrics containing details:
#
# hbase version
# cluster id
# primary/backup master(s)
# master's coprocessors
# live/dead regionservers
# balancer
# regions in transition
def getClusterMetrics
@admin.getClusterMetrics
end
#----------------------------------------------------------------------------------------------
# Returns a list of regionservers
def getRegionServers
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.map { |serverName| serverName }
@admin.getClusterMetrics.getLiveServerMetrics.keySet.map { |server_name| server_name }
end
#----------------------------------------------------------------------------------------------
@ -1447,7 +1461,7 @@ module Hbase
#----------------------------------------------------------------------------------------------
# List live region servers
def list_liveservers
org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getServers.to_a
@admin.getClusterMetrics.getLiveServerMetrics.keySet.to_a
end
#---------------------------------------------------------------------------

View File

@ -79,7 +79,7 @@ module Hbase
java_import 'java.io.InputStreamReader'
java_import 'org.apache.hbase.thirdparty.com.google.gson.JsonParser'
infoport = org.apache.hadoop.hbase.ClusterStatus.new(@admin.getClusterMetrics).getLoad(host).getInfoServerPort.to_s
infoport = @admin.getClusterMetrics.getLiveServerMetrics.get(host).getInfoServerPort.to_s
# Note: This condition use constants from hbase-server
# if (!@admin.getConfiguration().getBoolean(org.apache.hadoop.hbase.http.ServerConfigurationKeys::HBASE_SSL_ENABLED_KEY,

View File

@ -81,7 +81,7 @@ EOF
admin_instance = admin.instance_variable_get('@admin')
conn_instance = admin_instance.getConnection
cluster_status = org.apache.hadoop.hbase.ClusterStatus.new(admin_instance.getClusterMetrics)
cluster_metrics = admin_instance.getClusterMetrics
hregion_locator_instance = conn_instance.getRegionLocator(TableName.valueOf(table_name))
hregion_locator_list = hregion_locator_instance.getAllRegionLocations.to_a
results = []
@ -104,22 +104,23 @@ EOF
regions.each do |hregion|
hregion_info = hregion.getRegion
server_name = hregion.getServerName
server_load = cluster_status.getLoad(server_name)
if server_load.nil?
region_load_map = java.util.HashMap.new
server_metrics_map = cluster_metrics.getLiveServerMetrics
server_metrics = server_metrics_map.get(server_name)
if server_metrics.nil?
region_metrics_map = java.util.HashMap.new
else
region_load_map = server_load.getRegionsLoad
region_metrics_map = server_metrics.getRegionMetrics
end
region_name = hregion_info.getRegionNameAsString
region_load = region_load_map.get(hregion_info.getRegionName)
region_metrics = region_metrics_map.get(hregion_info.getRegionName)
if region_load.nil?
if region_metrics.nil?
puts "Can not find all details for region: " \
"#{region_name.strip} ," \
" it may be disabled or in transition\n"
else
# Ignore regions which exceed our locality threshold
next unless accept_region_for_locality? region_load.getDataLocality,
next unless accept_region_for_locality? region_metrics.getDataLocality,
locality_threshold
end
result_hash = {}
@ -147,30 +148,30 @@ EOF
end
if size_hash.key?('SIZE')
if region_load.nil?
if region_metrics.nil?
region_store_file_size = ''
else
region_store_file_size = region_load.getStorefileSizeMB.to_s.strip
region_store_file_size = region_metrics.getStoreFileSize.to_s.strip
end
result_hash.store('SIZE', region_store_file_size)
size_hash['SIZE'] = [size_hash['SIZE'], region_store_file_size.length].max
end
if size_hash.key?('REQ')
if region_load.nil?
if region_metrics.nil?
region_requests = ''
else
region_requests = region_load.getRequestsCount.to_s.strip
region_requests = region_metrics.getRequestCount.to_s.strip
end
result_hash.store('REQ', region_requests)
size_hash['REQ'] = [size_hash['REQ'], region_requests.length].max
end
if size_hash.key?('LOCALITY')
if region_load.nil?
if region_metrics.nil?
locality = ''
else
locality = region_load.getDataLocality.to_s.strip
locality = region_metrics.getDataLocality.to_s.strip
end
result_hash.store('LOCALITY', locality)
size_hash['LOCALITY'] = [size_hash['LOCALITY'], locality.length].max

View File

@ -29,7 +29,7 @@ Examples:
end
def command
rit = admin.getClusterStatus.getRegionStatesInTransition
rit = admin.getClusterMetrics.getRegionStatesInTransition
rit.each do |v|
formatter.row([v.toDescriptiveString])
end