HBASE-18238 rubocop autocorrect for bin/

This commit is contained in:
Mike Drob 2017-07-19 12:05:26 -05:00
parent cf050de917
commit 4e9961b4fc
7 changed files with 132 additions and 143 deletions

View File

@ -29,13 +29,13 @@ java_import org.apache.commons.logging.Log
java_import org.apache.commons.logging.LogFactory java_import org.apache.commons.logging.LogFactory
# Name of this script # Name of this script
NAME = "draining_servers" NAME = 'draining_servers'.freeze
# Do command-line parsing # Do command-line parsing
options = {} options = {}
optparse = OptionParser.new do |opts| optparse = OptionParser.new do |opts|
opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..." opts.banner = "Usage: ./hbase org.jruby.Main #{NAME}.rb [options] add|remove|list <hostname>|<host:port>|<servername> ..."
opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' + opts.separator 'Add remove or list servers in draining mode. Can accept either hostname to drain all region servers' \
'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space' 'in that host, a host:port pair or a host,port,startCode triplet. More than one server can be given separated by space'
opts.on('-h', '--help', 'Display usage information') do opts.on('-h', '--help', 'Display usage information') do
puts opts puts opts
@ -51,12 +51,12 @@ optparse.parse!
# Return array of servernames where servername is hostname+port+startcode # Return array of servernames where servername is hostname+port+startcode
# comma-delimited # comma-delimited
def getServers(admin) def getServers(admin)
serverInfos = admin.getClusterStatus().getServers() serverInfos = admin.getClusterStatus.getServers
servers = [] servers = []
for server in serverInfos for server in serverInfos
servers << server.getServerName() servers << server.getServerName
end end
return servers servers
end end
def getServerNames(hostOrServers, config) def getServerNames(hostOrServers, config)
@ -66,29 +66,29 @@ def getServerNames(hostOrServers, config)
for hostOrServer in hostOrServers for hostOrServer in hostOrServers
# check whether it is already serverName. No need to connect to cluster # check whether it is already serverName. No need to connect to cluster
parts = hostOrServer.split(',') parts = hostOrServer.split(',')
if parts.size() == 3 if parts.size == 3
ret << hostOrServer ret << hostOrServer
else else
admin = connection.getAdmin() if not admin admin = connection.getAdmin unless admin
servers = getServers(admin) servers = getServers(admin)
hostOrServer = hostOrServer.gsub(/:/, ",") hostOrServer = hostOrServer.tr(':', ',')
for server in servers for server in servers
ret << server if server.start_with?(hostOrServer) ret << server if server.start_with?(hostOrServer)
end end
end end
end end
admin.close() if admin admin.close if admin
connection.close() connection.close
return ret ret
end end
def addServers(options, hostOrServers) def addServers(_options, hostOrServers)
config = HBaseConfiguration.create() config = HBaseConfiguration.create
servers = getServerNames(hostOrServers, config) servers = getServerNames(hostOrServers, config)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode parentZnode = zkw.znodePaths.drainingZNode
begin begin
@ -97,15 +97,15 @@ def addServers(options, hostOrServers)
ZKUtil.createAndFailSilent(zkw, node) ZKUtil.createAndFailSilent(zkw, node)
end end
ensure ensure
zkw.close() zkw.close
end end
end end
def removeServers(options, hostOrServers) def removeServers(_options, hostOrServers)
config = HBaseConfiguration.create() config = HBaseConfiguration.create
servers = getServerNames(hostOrServers, config) servers = getServerNames(hostOrServers, config)
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode parentZnode = zkw.znodePaths.drainingZNode
begin begin
@ -114,54 +114,54 @@ def removeServers(options, hostOrServers)
ZKUtil.deleteNodeFailSilent(zkw, node) ZKUtil.deleteNodeFailSilent(zkw, node)
end end
ensure ensure
zkw.close() zkw.close
end end
end end
# list servers in draining mode # list servers in draining mode
def listServers(options) def listServers(_options)
config = HBaseConfiguration.create() config = HBaseConfiguration.create
zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, "draining_servers", nil) zkw = org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher.new(config, 'draining_servers', nil)
parentZnode = zkw.znodePaths.drainingZNode parentZnode = zkw.znodePaths.drainingZNode
servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode) servers = ZKUtil.listChildrenNoWatch(zkw, parentZnode)
servers.each {|server| puts server} servers.each { |server| puts server }
end end
hostOrServers = ARGV[1..ARGV.size()] hostOrServers = ARGV[1..ARGV.size]
# Create a logger and disable the DEBUG-level annoying client logging # Create a logger and disable the DEBUG-level annoying client logging
def configureLogging(options) def configureLogging(options)
apacheLogger = LogFactory.getLog(NAME) apacheLogger = LogFactory.getLog(NAME)
# Configure log4j to not spew so much # Configure log4j to not spew so much
unless (options[:debug]) unless options[:debug]
logger = org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase") logger = org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase')
logger.setLevel(org.apache.log4j.Level::WARN) logger.setLevel(org.apache.log4j.Level::WARN)
logger = org.apache.log4j.Logger.getLogger("org.apache.zookeeper") logger = org.apache.log4j.Logger.getLogger('org.apache.zookeeper')
logger.setLevel(org.apache.log4j.Level::WARN) logger.setLevel(org.apache.log4j.Level::WARN)
end end
return apacheLogger apacheLogger
end end
# Create a logger and save it to ruby global # Create a logger and save it to ruby global
$LOG = configureLogging(options) $LOG = configureLogging(options)
case ARGV[0] case ARGV[0]
when 'add' when 'add'
if ARGV.length < 2 if ARGV.length < 2
puts optparse puts optparse
exit 1 exit 1
end end
addServers(options, hostOrServers) addServers(options, hostOrServers)
when 'remove' when 'remove'
if ARGV.length < 2 if ARGV.length < 2
puts optparse puts optparse
exit 1 exit 1
end end
removeServers(options, hostOrServers) removeServers(options, hostOrServers)
when 'list' when 'list'
listServers(options) listServers(options)
else else
puts optparse puts optparse
exit 3 exit 3
end end

View File

@ -32,7 +32,7 @@ config = HBaseConfiguration.create
zk = ZooKeeperWatcher.new(config, 'get-active-master', nil) zk = ZooKeeperWatcher.new(config, 'get-active-master', nil)
begin begin
puts MasterAddressTracker.getMasterAddress(zk).getHostname() puts MasterAddressTracker.getMasterAddress(zk).getHostname
ensure ensure
zk.close() zk.close
end end

View File

@ -72,13 +72,13 @@ log_level = org.apache.log4j.Level::ERROR
interactive = true interactive = true
for arg in ARGV for arg in ARGV
if arg =~ /^--format=(.+)/i if arg =~ /^--format=(.+)/i
format = $1 format = Regexp.last_match(1)
if format =~ /^html$/i if format =~ /^html$/i
raise NoMethodError.new("Not yet implemented") raise NoMethodError, 'Not yet implemented'
elsif format =~ /^console$/i elsif format =~ /^console$/i
# This is default # This is default
else else
raise ArgumentError.new("Unsupported format " + arg) raise ArgumentError, 'Unsupported format ' + arg
end end
found.push(arg) found.push(arg)
elsif arg == '-h' || arg == '--help' elsif arg == '-h' || arg == '--help'
@ -89,7 +89,7 @@ for arg in ARGV
$fullBackTrace = true $fullBackTrace = true
@shell_debug = true @shell_debug = true
found.push(arg) found.push(arg)
puts "Setting DEBUG log level..." puts 'Setting DEBUG log level...'
elsif arg == '-n' || arg == '--noninteractive' elsif arg == '-n' || arg == '--noninteractive'
interactive = false interactive = false
found.push(arg) found.push(arg)
@ -106,13 +106,11 @@ end
# Delete all processed args # Delete all processed args
found.each { |arg| ARGV.delete(arg) } found.each { |arg| ARGV.delete(arg) }
# Make sure debug flag gets back to IRB # Make sure debug flag gets back to IRB
if @shell_debug ARGV.unshift('-d') if @shell_debug
ARGV.unshift('-d')
end
# Set logging level to avoid verboseness # Set logging level to avoid verboseness
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
# Require HBase now after setting log levels # Require HBase now after setting log levels
require 'hbase_constants' require 'hbase_constants'
@ -155,8 +153,8 @@ def debug
conf.back_trace_limit = 100 conf.back_trace_limit = 100
log_level = org.apache.log4j.Level::DEBUG log_level = org.apache.log4j.Level::DEBUG
end end
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
debug? debug?
end end
@ -176,22 +174,22 @@ if interactive
# Output a banner message that tells users where to go for help # Output a banner message that tells users where to go for help
@shell.print_banner @shell.print_banner
require "irb" require 'irb'
require 'irb/hirb' require 'irb/hirb'
module IRB module IRB
def self.start(ap_path = nil) def self.start(ap_path = nil)
$0 = File::basename(ap_path, ".rb") if ap_path $0 = File.basename(ap_path, '.rb') if ap_path
IRB.setup(ap_path) IRB.setup(ap_path)
@CONF[:IRB_NAME] = 'hbase' @CONF[:IRB_NAME] = 'hbase'
@CONF[:AP_NAME] = 'hbase' @CONF[:AP_NAME] = 'hbase'
@CONF[:BACK_TRACE_LIMIT] = 0 unless $fullBackTrace @CONF[:BACK_TRACE_LIMIT] = 0 unless $fullBackTrace
if @CONF[:SCRIPT] hirb = if @CONF[:SCRIPT]
hirb = HIRB.new(nil, @CONF[:SCRIPT]) HIRB.new(nil, @CONF[:SCRIPT])
else else
hirb = HIRB.new HIRB.new
end end
@CONF[:IRB_RC].call(hirb.context) if @CONF[:IRB_RC] @CONF[:IRB_RC].call(hirb.context) if @CONF[:IRB_RC]
@ -211,9 +209,9 @@ else
# in order to maintain compatibility with previous behavior where # in order to maintain compatibility with previous behavior where
# a user could pass in script2run and then still pipe commands on # a user could pass in script2run and then still pipe commands on
# stdin. # stdin.
require "irb/ruby-lex" require 'irb/ruby-lex'
require "irb/workspace" require 'irb/workspace'
workspace = IRB::WorkSpace.new(binding()) workspace = IRB::WorkSpace.new(binding)
scanner = RubyLex.new scanner = RubyLex.new
# RubyLex claims to take an IO but really wants an InputMethod # RubyLex claims to take an IO but really wants an InputMethod
@ -234,8 +232,8 @@ else
message = exception.to_s message = exception.to_s
# exception unwrapping in shell means we'll have to handle Java exceptions # exception unwrapping in shell means we'll have to handle Java exceptions
# as a special case in order to format them properly. # as a special case in order to format them properly.
if exception.kind_of? java.lang.Exception if exception.is_a? java.lang.Exception
$stderr.puts "java exception" $stderr.puts 'java exception'
message = exception.get_message message = exception.get_message
end end
# Include the 'ERROR' string to try to make transition easier for scripts that # Include the 'ERROR' string to try to make transition easier for scripts that

View File

@ -20,5 +20,5 @@
# not move a new region until successful confirm of region loading in new # not move a new region until successful confirm of region loading in new
# location. Presumes balancer is disabled when we run (not harmful if its # location. Presumes balancer is disabled when we run (not harmful if its
# on but this script and balancer will end up fighting each other). # on but this script and balancer will end up fighting each other).
$BIN=File.dirname(__FILE__) $BIN = File.dirname(__FILE__)
exec "#{$BIN}/hbase org.apache.hadoop.hbase.util.RegionMover #{ARGV.join(' ')}" exec "#{$BIN}/hbase org.apache.hadoop.hbase.util.RegionMover #{ARGV.join(' ')}"

View File

@ -22,10 +22,9 @@
# #
# ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>] # ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>]
require 'optparse' require 'optparse'
usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' + usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' \
'[--table <table_name>]\n' '[--table <table_name>]\n'
OptionParser.new do |o| OptionParser.new do |o|
o.banner = usage o.banner = usage
@ -37,12 +36,11 @@ OptionParser.new do |o|
end end
SHOULD_WAIT = ARGV[0] == 'wait' SHOULD_WAIT = ARGV[0] == 'wait'
if ARGV[0] and not SHOULD_WAIT if ARGV[0] && !SHOULD_WAIT
print usage print usage
exit 1 exit 1
end end
require 'java' require 'java'
java_import org.apache.hadoop.hbase.HBaseConfiguration java_import org.apache.hadoop.hbase.HBaseConfiguration
@ -61,17 +59,17 @@ java_import org.apache.hadoop.hbase.client.ConnectionFactory
# disable debug logging on this script for clarity # disable debug logging on this script for clarity
log_level = org.apache.log4j.Level::ERROR log_level = org.apache.log4j.Level::ERROR
org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level) org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
config = HBaseConfiguration.create config = HBaseConfiguration.create
config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR) config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
connection = ConnectionFactory.createConnection(config) connection = ConnectionFactory.createConnection(config)
# wait until the master is running # wait until the master is running
admin = nil admin = nil
while true loop do
begin begin
admin = connection.getAdmin() admin = connection.getAdmin
break break
rescue MasterNotRunningException => e rescue MasterNotRunningException => e
print 'Waiting for master to start...\n' print 'Waiting for master to start...\n'
@ -99,7 +97,7 @@ REGION_INFO = 'regioninfo'.to_java_bytes
scan.addColumn INFO, REGION_INFO scan.addColumn INFO, REGION_INFO
table = nil table = nil
iter = nil iter = nil
while true loop do
begin begin
table = connection.getTable(TableName.valueOf('hbase:meta')) table = connection.getTable(TableName.valueOf('hbase:meta'))
scanner = table.getScanner(scan) scanner = table.getScanner(scan)
@ -112,14 +110,14 @@ while true
end end
while iter.hasNext while iter.hasNext
result = iter.next result = iter.next
rowid = Bytes.toString(result.getRow()) rowid = Bytes.toString(result.getRow)
rowidStr = java.lang.String.new(rowid) rowidStr = java.lang.String.new(rowid)
if not $tablename.nil? and not rowidStr.startsWith(tableNameMetaPrefix) if !$tablename.nil? && !rowidStr.startsWith(tableNameMetaPrefix)
# Gone too far, break # Gone too far, break
break break
end end
region = MetaTableAccessor::getHRegionInfo(result) region = MetaTableAccessor.getHRegionInfo(result)
if not region.isOffline unless region.isOffline
# only include regions that should be online # only include regions that should be online
meta_count += 1 meta_count += 1
end end
@ -127,30 +125,26 @@ end
scanner.close scanner.close
# If we're trying to see the status of all HBase tables, we need to include the # If we're trying to see the status of all HBase tables, we need to include the
# hbase:meta table, that is not included in our scan # hbase:meta table, that is not included in our scan
if $tablename.nil? meta_count += 1 if $tablename.nil?
meta_count += 1
end
# query the master to see how many regions are on region servers # query the master to see how many regions are on region servers
if not $tablename.nil? $TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
$TableName = TableName.valueOf($tablename.to_java_bytes) loop do
end
while true
if $tablename.nil? if $tablename.nil?
server_count = admin.getClusterStatus().getRegionsCount() server_count = admin.getClusterStatus.getRegionsCount
else else
connection = ConnectionFactory::createConnection(config); connection = ConnectionFactory.createConnection(config)
server_count = MetaTableAccessor::allTableRegions(connection, $TableName).size() server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
end end
print "Region Status: #{server_count} / #{meta_count}\n" print "Region Status: #{server_count} / #{meta_count}\n"
if SHOULD_WAIT and server_count < meta_count if SHOULD_WAIT && server_count < meta_count
#continue this loop until server & meta count match # continue this loop until server & meta count match
sleep 10 sleep 10
else else
break break
end end
end end
admin.close() admin.close
connection.close() connection.close
exit server_count == meta_count ? 0 : 1 exit server_count == meta_count ? 0 : 1

View File

@ -34,20 +34,20 @@ java_import org.apache.hadoop.hbase.client.ConnectionFactory
java_import org.apache.hadoop.hbase.client.HBaseAdmin java_import org.apache.hadoop.hbase.client.HBaseAdmin
# Name of this script # Name of this script
NAME = "copy_tables_desc" NAME = 'copy_tables_desc'.freeze
# Print usage for this script # Print usage for this script
def usage def usage
puts 'Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]' % NAME puts format('Usage: %s.rb master_zookeeper.quorum.peers:clientport:znode_parent slave_zookeeper.quorum.peers:clientport:znode_parent [table1,table2,table3,...]', NAME)
exit! exit!
end end
def copy (src, dst, table) def copy(src, dst, table)
# verify if table exists in source cluster # verify if table exists in source cluster
begin begin
t = src.getTableDescriptor(TableName.valueOf(table)) t = src.getTableDescriptor(TableName.valueOf(table))
rescue org.apache.hadoop.hbase.TableNotFoundException rescue org.apache.hadoop.hbase.TableNotFoundException
puts "Source table \"%s\" doesn't exist, skipping." % table puts format("Source table \"%s\" doesn't exist, skipping.", table)
return return
end end
@ -55,45 +55,42 @@ def copy (src, dst, table)
begin begin
dst.createTable(t) dst.createTable(t)
rescue org.apache.hadoop.hbase.TableExistsException rescue org.apache.hadoop.hbase.TableExistsException
puts "Destination table \"%s\" exists in remote cluster, skipping." % table puts format('Destination table "%s" exists in remote cluster, skipping.', table)
return return
end end
puts "Schema for table \"%s\" was succesfully copied to remote cluster." % table puts format('Schema for table "%s" was succesfully copied to remote cluster.', table)
end end
usage if ARGV.size < 2 || ARGV.size > 3
if ARGV.size < 2 || ARGV.size > 3
usage
end
LOG = LogFactory.getLog(NAME) LOG = LogFactory.getLog(NAME)
parts1 = ARGV[0].split(":") parts1 = ARGV[0].split(':')
parts2 = ARGV[1].split(":") parts2 = ARGV[1].split(':')
parts3 = ARGV[2].split(",") unless ARGV[2].nil? parts3 = ARGV[2].split(',') unless ARGV[2].nil?
c1 = HBaseConfiguration.create() c1 = HBaseConfiguration.create
c1.set(HConstants::ZOOKEEPER_QUORUM, parts1[0]) c1.set(HConstants::ZOOKEEPER_QUORUM, parts1[0])
c1.set("hbase.zookeeper.property.clientPort", parts1[1]) c1.set('hbase.zookeeper.property.clientPort', parts1[1])
c1.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts1[2]) c1.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts1[2])
connection1 = ConnectionFactory.createConnection(c1) connection1 = ConnectionFactory.createConnection(c1)
admin1 = connection1.getAdmin() admin1 = connection1.getAdmin
c2 = HBaseConfiguration.create() c2 = HBaseConfiguration.create
c2.set(HConstants::ZOOKEEPER_QUORUM, parts2[0]) c2.set(HConstants::ZOOKEEPER_QUORUM, parts2[0])
c2.set("hbase.zookeeper.property.clientPort", parts2[1]) c2.set('hbase.zookeeper.property.clientPort', parts2[1])
c2.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts2[2]) c2.set(HConstants::ZOOKEEPER_ZNODE_PARENT, parts2[2])
connection2 = ConnectionFactory.createConnection(c2) connection2 = ConnectionFactory.createConnection(c2)
admin2 = connection2.getAdmin() admin2 = connection2.getAdmin
if parts3.nil? if parts3.nil?
admin1.listTableNames().each do |t| admin1.listTableNames.each do |t|
copy(admin1, admin2, t.nameAsString()) copy(admin1, admin2, t.nameAsString)
end end
else else
parts3.each do |t| parts3.each do |t|
@ -101,7 +98,7 @@ else
end end
end end
admin1.close() admin1.close
admin2.close() admin2.close
connection1.close() connection1.close
connection2.close() connection2.close

View File

@ -27,24 +27,24 @@ java_import org.apache.hadoop.hbase.HBaseConfiguration
java_import org.apache.hadoop.hbase.client.HBaseAdmin java_import org.apache.hadoop.hbase.client.HBaseAdmin
java_import org.apache.hadoop.hbase.client.ConnectionFactory java_import org.apache.hadoop.hbase.client.ConnectionFactory
def usage(msg=nil) def usage(msg = nil)
$stderr.puts 'Usage: shutdown_regionserver.rb <host:port>..' $stderr.puts 'Usage: shutdown_regionserver.rb <host:port>..'
$stderr.puts $stderr.puts
$stderr.puts 'Stops the specified regionservers via RPC' $stderr.puts 'Stops the specified regionservers via RPC'
$stderr.puts 'Error: %s' % msg if msg $stderr.puts format('Error: %s', msg) if msg
abort abort
end end
usage if ARGV.length < 1 usage if ARGV.empty?
ARGV.each do |x| ARGV.each do |x|
usage 'Invalid host:port: %s' % x unless x.include? ':' usage format('Invalid host:port: %s', x) unless x.include? ':'
end end
config = HBaseConfiguration.create() config = HBaseConfiguration.create
connection = ConnectionFactory.createConnection(config) connection = ConnectionFactory.createConnection(config)
begin begin
admin = connection.getAdmin() admin = connection.getAdmin
rescue rescue
abort "Error: Couldn't instantiate HBaseAdmin" abort "Error: Couldn't instantiate HBaseAdmin"
end end
@ -52,5 +52,5 @@ end
ARGV.each do |hostport| ARGV.each do |hostport|
admin.stopRegionServer(hostport) admin.stopRegionServer(hostport)
end end
admin.close() admin.close
connection.close() connection.close