HBASE-18238 rubocop autocorrect for bin/

This commit is contained in:
Mike Drob 2017-07-19 12:05:26 -05:00
parent 096dac2e83
commit ea8fa59a4c
7 changed files with 132 additions and 143 deletions

View File

@ -16,7 +16,7 @@
# limitations under the License.
#
# Add or remove servers from draining mode via zookeeper
# Add or remove servers from draining mode via zookeeper
require 'optparse'
include Java
@ -29,13 +29,13 @@ java_import org.apache.commons.logging.Log
java_import org.apache.commons.logging.LogFactory
# Name of this script
NAME = "draining_servers"
NAME = 'draining_servers'.freeze
# Do command-line parsing
options = {}
optparse = OptionParser.new do |opts|
opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' +
opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' \
'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
opts.on('-h', '--help', 'Display usage information') do
puts opts
@ -51,117 +51,117 @@ optparse.parse!
# Return array of servernames where servername is hostname+port+startcode
# comma-delimited
def getServers(admin)
serverInfos = admin.getClusterStatus().getServers()
serverInfos = admin.getClusterStatus.getServers
servers = []
for server in serverInfos
servers << server.getServerName()
servers << server.getServerName
end
return servers
servers
end
def getServerNames(hostOrServers, config)
ret = []
connection = ConnectionFactory.createConnection(config)
for hostOrServer in hostOrServers
# check whether it is already serverName. No need to connect to cluster
parts = hostOrServer.split(',')
if parts.size() == 3
if parts.size == 3
ret << hostOrServer
else
admin = connection.getAdmin() if not admin
else
admin = connection.getAdmin unless admin
servers = getServers(admin)
hostOrServer = hostOrServer.gsub(/:/, ",")
for server in servers
hostOrServer = hostOrServer.tr(':', ',')
for server in servers
ret << server if server.start_with?(hostOrServer)
end
end
end
admin.close() if admin
connection.close()
return ret
admin.close if admin
connection.close
ret
end
def addServers(options, hostOrServers)
config = HBaseConfiguration.create()
def addServers(_options, hostOrServers)
config = HBaseConfiguration.create
servers = getServerNames(hostOrServers, config)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
begin
for server in servers
node = ZKUtil.joinZNode(parentZnode, server)
ZKUtil.createAndFailSilent(zkw, node)
end
ensure
zkw.close()
zkw.close
end
end
def removeServers(options, hostOrServers)
config = HBaseConfiguration.create()
def removeServers(_options, hostOrServers)
config = HBaseConfiguration.create
servers = getServerNames(hostOrServers, config)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
begin
for server in servers
node = ZKUtil.joinZNode(parentZnode, server)
ZKUtil.deleteNodeFailSilent(zkw, node)
end
ensure
zkw.close()
zkw.close
end
end
# list servers in draining mode
def listServers(options)
config = HBaseConfiguration.create()
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil)
def listServers(_options)
config = HBaseConfiguration.create
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode
servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
servers.each {|server| puts server}
servers.each { |server| puts server }
end
hostOrServers = ARGV[1..ARGV.size()]
hostOrServers = ARGV[1..ARGV.size]
# Create a logger and disable the DEBUG-level annoying client logging
def configureLogging(options)
apacheLogger = LogFactory.getLog(NAME)
# Configure log4j to not spew so much
unless (options[:debug])
logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase")
unless options[:debug]
logger = org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase')
logger.setLevel(org.apache.log4j.Level::WARN)
logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper")
logger = org.apache.log4j.Logger.getLogger('org.apache.zookeeper')
logger.setLevel(org.apache.log4j.Level::WARN)
end
return apacheLogger
apacheLogger
end
# Create a logger and save it to ruby global
$LOG = configureLogging(options)
case ARGV[0]
when 'add'
if ARGV.length < 2
puts optparse
exit 1
end
addServers(options, hostOrServers)
when 'remove'
if ARGV.length < 2
puts optparse
exit 1
end
removeServers(options, hostOrServers)
when 'list'
listServers(options)
else
when 'add'
if ARGV.length < 2
puts optparse
exit 3
exit 1
end
addServers(options, hostOrServers)
when 'remove'
if ARGV.length < 2
puts optparse
exit 1
end
removeServers(options, hostOrServers)
when 'list'
listServers(options)
else
puts optparse
exit 3
end

View File

@ -17,7 +17,7 @@
# Prints the hostname of the machine running the active master.
include Java
include Java
java_import org.apache.hadoop.hbase.HBaseConfiguration
java_import org.apache.hadoop.hbase.ServerName
java_import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
@ -32,7 +32,7 @@ config = HBaseConfiguration.create
zk = ZooKeeperWatcher.new(config, 'get-active-master', nil)
begin
puts MasterAddressTracker.getMasterAddress(zk).getHostname()
puts MasterAddressTracker.getMasterAddress(zk).getHostname
ensure
zk.close()
zk.close
end

View File

@ -72,13 +72,13 @@ log_level = org.apache.log4j.Level::ERROR
interactive = true
for arg in ARGV
if arg =~ /^--format=(.+)/i
format = $1
format = Regexp.last_match(1)
if format =~ /^html$/i
raise NoMethodError.new("Not yet implemented")
raise NoMethodError, 'Not yet implemented'
elsif format =~ /^console$/i
# This is default
else
raise ArgumentError.new("Unsupported format " + arg)
raise ArgumentError, 'Unsupported format ' + arg
end
found.push(arg)
elsif arg == '-h' || arg == '--help'
@ -89,7 +89,7 @@ for arg in ARGV
$fullBackTrace = true
@shell_debug = true
found.push(arg)
puts "Setting DEBUG log level..."
puts 'Setting DEBUG log level...'
elsif arg == '-n' || arg == '--noninteractive'
interactive = false
found.push(arg)
@ -106,13 +106,11 @@ end
# Delete all processed args
found.each { |arg| ARGV.delete(arg) }
# Make sure debug flag gets back to IRB
if @shell_debug
ARGV.unshift('-d')
end
ARGV.unshift('-d') if @shell_debug
# Set logging level to avoid verboseness
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
# Require HBase now after setting log levels
require 'hbase_constants'
@ -155,8 +153,8 @@ def debug
conf.back_trace_limit = 100
log_level = org.apache.log4j.Level::DEBUG
end
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
debug?
end
@ -176,23 +174,23 @@ if interactive
# Output a banner message that tells users where to go for help
@shell.print_banner
require "irb"
require 'irb'
require 'irb/hirb'
module IRB
def self.start(ap_path = nil)
$0 = File::basename(ap_path, ".rb") if ap_path
$0 = File.basename(ap_path, '.rb') if ap_path
IRB.setup(ap_path)
@CONF[:IRB_NAME] = 'hbase'
@CONF[:AP_NAME] = 'hbase'
@CONF[:BACK_TRACE_LIMIT] = 0 unless $fullBackTrace
if @CONF[:SCRIPT]
hirb = HIRB.new(nil, @CONF[:SCRIPT])
else
hirb = HIRB.new
end
hirb = if @CONF[:SCRIPT]
HIRB.new(nil, @CONF[:SCRIPT])
else
HIRB.new
end
@CONF[:IRB_RC].call(hirb.context) if @CONF[:IRB_RC]
@CONF[:MAIN_CONTEXT] = hirb.context
@ -211,9 +209,9 @@ else
# in order to maintain compatibility with previous behavior where
# a user could pass in script2run and then still pipe commands on
# stdin.
require "irb/ruby-lex"
require "irb/workspace"
workspace = IRB::WorkSpace.new(binding())
require 'irb/ruby-lex'
require 'irb/workspace'
workspace = IRB::WorkSpace.new(binding)
scanner = RubyLex.new
# RubyLex claims to take an IO but really wants an InputMethod
@ -226,7 +224,7 @@ else
scanner.set_input(STDIN)
scanner.each_top_level_statement do |statement, linenum|
puts(workspace.evaluate(nil, statement, 'stdin', linenum))
puts(workspace.evaluate(nil, statement, 'stdin', linenum))
end
# XXX We're catching Exception on purpose, because we want to include
# unwrapped java exceptions, syntax errors, eval failures, etc.
@ -234,8 +232,8 @@ else
message = exception.to_s
# exception unwrapping in shell means we'll have to handle Java exceptions
# as a special case in order to format them properly.
if exception.kind_of? java.lang.Exception
$stderr.puts "java exception"
if exception.is_a? java.lang.Exception
$stderr.puts 'java exception'
message = exception.get_message
end
# Include the 'ERROR' string to try to make transition easier for scripts that

View File

@ -20,5 +20,5 @@
# not move a new region until successful confirm of region loading in new
# location. Presumes balancer is disabled when we run (not harmful if its
# on but this script and balancer will end up fighting each other).
$BIN=File.dirname(__FILE__)
$BIN = File.dirname(__FILE__)
exec "#{$BIN}/hbase org.apache.hadoop.hbase.util.RegionMover #{ARGV.join(' ')}"

View File

@ -22,11 +22,10 @@
#
# ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>]
require 'optparse'
usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' +
'[--table <table_name>]\n'
usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' \
'[--table <table_name>]\n'
OptionParser.new do |o|
o.banner = usage
o.on('-t', '--table TABLENAME', 'Only process TABLENAME') do |tablename|
@ -37,12 +36,11 @@ OptionParser.new do |o|
end
SHOULD_WAIT = ARGV[0] == 'wait'
if ARGV[0] and not SHOULD_WAIT
if ARGV[0] && !SHOULD_WAIT
print usage
exit 1
end
require 'java'
java_import org.apache.hadoop.hbase.HBaseConfiguration
@ -61,17 +59,17 @@ java_import org.apache.hadoop.hbase.client.ConnectionFactory
# disable debug logging on this script for clarity
log_level = org.apache.log4j.Level::ERROR
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
config = HBaseConfiguration.create
config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
connection = ConnectionFactory.createConnection(config)
# wait until the master is running
admin = nil
while true
loop do
begin
admin = connection.getAdmin()
admin = connection.getAdmin
break
rescue MasterNotRunningException => e
print 'Waiting for master to start...\n'
@ -99,7 +97,7 @@ REGION_INFO = 'regioninfo'.to_java_bytes
scan.addColumn INFO, REGION_INFO
table = nil
iter = nil
while true
loop do
begin
table = connection.getTable(TableName.valueOf('hbase:meta'))
scanner = table.getScanner(scan)
@ -112,14 +110,14 @@ while true
end
while iter.hasNext
result = iter.next
rowid = Bytes.toString(result.getRow())
rowid = Bytes.toString(result.getRow)
rowidStr = java.lang.String.new(rowid)
if not $tablename.nil? and not rowidStr.startsWith(tableNameMetaPrefix)
if !$tablename.nil? && !rowidStr.startsWith(tableNameMetaPrefix)
# Gone too far, break
break
end
region = MetaTableAccessor::getHRegionInfo(result)
if not region.isOffline
region = MetaTableAccessor.getHRegionInfo(result)
unless region.isOffline
# only include regions that should be online
meta_count += 1
end
@ -127,30 +125,26 @@ end
scanner.close
# If we're trying to see the status of all HBase tables, we need to include the
# hbase:meta table, that is not included in our scan
if $tablename.nil?
meta_count += 1
end
meta_count += 1 if $tablename.nil?
# query the master to see how many regions are on region servers
if not $tablename.nil?
$TableName = TableName.valueOf($tablename.to_java_bytes)
end
while true
$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
loop do
if $tablename.nil?
server_count = admin.getClusterStatus().getRegionsCount()
server_count = admin.getClusterStatus.getRegionsCount
else
connection = ConnectionFactory::createConnection(config);
server_count = MetaTableAccessor::allTableRegions(connection, $TableName).size()
connection = ConnectionFactory.createConnection(config)
server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
end
print "Region Status: #{server_count} / #{meta_count}\n"
if SHOULD_WAIT and server_count < meta_count
#continue this loop until server & meta count match
if SHOULD_WAIT && server_count < meta_count
# continue this loop until server & meta count match
sleep 10
else
break
end
end
admin.close()
connection.close()
admin.close
connection.close
exit server_count == meta_count ? 0 : 1

View File

@ -34,20 +34,20 @@ java_import org.apache.hadoop.hbase.client.ConnectionFactory
java_import org.apache.hadoop.hbase.client.HBaseAdmin
# Name of this script
NAME = "copy_tables_desc"
NAME = 'copy_tables_desc'.freeze
# Print usage for this script
def usage
puts 'Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]' % NAME
puts format('Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]', NAME)
exit!
end
def copy (src, dst, table)
def copy(src, dst, table)
# verify if table exists in source cluster
begin
t = src.getTableDescriptor(TableName.valueOf(table))
rescue org.apache.hadoop.hbase.TableNotFoundException
puts "Source table \"%s\" doesn't exist, skipping." % table
puts format("Source table \"%s\" doesn't exist, skipping.", table)
return
end
@ -55,45 +55,42 @@ def copy (src, dst, table)
begin
dst.createTable(t)
rescue org.apache.hadoop.hbase.TableExistsException
puts "Destination table \"%s\" exists in remote cluster, skipping." % table
puts format('Destination table "%s" exists in remote cluster, skipping.', table)
return
end
puts "Schema for table \"%s\" was succesfully copied to remote cluster." % table
puts format('Schema for table "%s" was succesfully copied to remote cluster.', table)
end
if ARGV.size < 2 || ARGV.size > 3
usage
end
usage if ARGV.size < 2 || ARGV.size > 3
LOG = LogFactory.getLog(NAME)
parts1 = ARGV[0].split(":")
parts1 = ARGV[0].split(':')
parts2 = ARGV[1].split(":")
parts2 = ARGV[1].split(':')
parts3 = ARGV[2].split(",") unless ARGV[2].nil?
parts3 = ARGV[2].split(',') unless ARGV[2].nil?
c1 = HBaseConfiguration.create()
c1 = HBaseConfiguration.create
c1.set(HConstants::ZOOKEEPER_QUORUM, parts1[0])
c1.set("hbase.zookeeper.property.clientPort", parts1[1])
c1.set('hbase.zookeeper.property.clientPort', parts1[1])
c1.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts1[2])
connection1 = ConnectionFactory.createConnection(c1)
admin1 = connection1.getAdmin()
admin1 = connection1.getAdmin
c2 = HBaseConfiguration.create()
c2 = HBaseConfiguration.create
c2.set(HConstants::ZOOKEEPER_QUORUM, parts2[0])
c2.set("hbase.zookeeper.property.clientPort", parts2[1])
c2.set('hbase.zookeeper.property.clientPort', parts2[1])
c2.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts2[2])
connection2 = ConnectionFactory.createConnection(c2)
admin2 = connection2.getAdmin()
admin2 = connection2.getAdmin
if parts3.nil?
admin1.listTableNames().each do |t|
copy(admin1, admin2, t.nameAsString())
admin1.listTableNames.each do |t|
copy(admin1, admin2, t.nameAsString)
end
else
parts3.each do |t|
@ -101,7 +98,7 @@ else
end
end
admin1.close()
admin2.close()
connection1.close()
connection2.close()
admin1.close
admin2.close
connection1.close
connection2.close

View File

@ -27,24 +27,24 @@ java_import org.apache.hadoop.hbase.HBaseConfiguration
java_import org.apache.hadoop.hbase.client.HBaseAdmin
java_import org.apache.hadoop.hbase.client.ConnectionFactory
def usage(msg=nil)
def usage(msg = nil)
$stderr.puts 'Usage: shutdown_regionserver.rb <host:port>..'
$stderr.puts
$stderr.puts 'Stops the specified regionservers via RPC'
$stderr.puts 'Error: %s' % msg if msg
$stderr.puts format('Error: %s', msg) if msg
abort
end
usage if ARGV.length < 1
usage if ARGV.empty?
ARGV.each do |x|
usage 'Invalid host:port: %s' % x unless x.include? ':'
usage format('Invalid host:port: %s', x) unless x.include? ':'
end
config = HBaseConfiguration.create()
config = HBaseConfiguration.create
connection = ConnectionFactory.createConnection(config)
begin
admin = connection.getAdmin()
admin = connection.getAdmin
rescue
abort "Error: Couldn't instantiate HBaseAdmin"
end
@ -52,5 +52,5 @@ end
ARGV.each do |hostport|
admin.stopRegionServer(hostport)
end
admin.close()
connection.close()
admin.close
connection.close