2011-11-17 17:20:41 -05:00
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
# or more contributor license agreements. See the NOTICE file
|
|
|
|
# distributed with this work for additional information
|
|
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
|
|
# to you under the Apache License, Version 2.0 (the
|
|
|
|
# "License"); you may not use this file except in compliance
|
|
|
|
# with the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
#
|
|
|
|
# View the current status of all regions on an HBase cluster. This is
|
|
|
|
# predominantly used to determined if all the regions in META have been
|
|
|
|
# onlined yet on startup.
|
|
|
|
#
|
|
|
|
# To use this script, run:
|
|
|
|
#
|
|
|
|
# ${HBASE_HOME}/bin/hbase org.jruby.Main region_status.rb [wait] [--table <table_name>]
|
|
|
|
|
|
|
|
require 'optparse'
|
|
|
|
|
2017-07-19 13:05:26 -04:00
|
|
|
usage = 'Usage : ./hbase org.jruby.Main region_status.rb [wait]' \
|
|
|
|
'[--table <table_name>]\n'
|
2011-11-17 17:20:41 -05:00
|
|
|
OptionParser.new do |o|
|
|
|
|
o.banner = usage
|
|
|
|
o.on('-t', '--table TABLENAME', 'Only process TABLENAME') do |tablename|
|
|
|
|
$tablename = tablename
|
|
|
|
end
|
|
|
|
o.on('-h', '--help', 'Display help message') { puts o; exit }
|
|
|
|
o.parse!
|
|
|
|
end
|
|
|
|
|
|
|
|
SHOULD_WAIT = ARGV[0] == 'wait'
|
2017-07-19 13:05:26 -04:00
|
|
|
if ARGV[0] && !SHOULD_WAIT
|
2011-11-17 17:20:41 -05:00
|
|
|
print usage
|
|
|
|
exit 1
|
|
|
|
end
|
|
|
|
|
|
|
|
require 'java'
|
|
|
|
|
2017-05-18 12:32:40 -04:00
|
|
|
java_import org.apache.hadoop.hbase.HBaseConfiguration
|
|
|
|
java_import org.apache.hadoop.hbase.TableName
|
|
|
|
java_import org.apache.hadoop.hbase.HConstants
|
|
|
|
java_import org.apache.hadoop.hbase.MasterNotRunningException
|
|
|
|
java_import org.apache.hadoop.hbase.client.HBaseAdmin
|
|
|
|
java_import org.apache.hadoop.hbase.client.Table
|
|
|
|
java_import org.apache.hadoop.hbase.client.Scan
|
|
|
|
java_import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
|
|
|
|
java_import org.apache.hadoop.hbase.util.Bytes
|
|
|
|
java_import org.apache.hadoop.hbase.HRegionInfo
|
|
|
|
java_import org.apache.hadoop.hbase.MetaTableAccessor
|
|
|
|
java_import org.apache.hadoop.hbase.HTableDescriptor
|
|
|
|
java_import org.apache.hadoop.hbase.client.ConnectionFactory
|
2011-11-17 17:20:41 -05:00
|
|
|
|
|
|
|
# disable debug logging on this script for clarity
|
|
|
|
log_level = org.apache.log4j.Level::ERROR
|
2017-07-19 13:05:26 -04:00
|
|
|
org.apache.log4j.Logger.getLogger('org.apache.zookeeper').setLevel(log_level)
|
|
|
|
org.apache.log4j.Logger.getLogger('org.apache.hadoop.hbase').setLevel(log_level)
|
2011-11-17 17:20:41 -05:00
|
|
|
|
|
|
|
config = HBaseConfiguration.create
|
2014-04-29 14:19:37 -04:00
|
|
|
config.set 'fs.defaultFS', config.get(HConstants::HBASE_DIR)
|
2015-04-17 09:38:46 -04:00
|
|
|
connection = ConnectionFactory.createConnection(config)
|
2011-11-17 17:20:41 -05:00
|
|
|
# wait until the master is running
|
|
|
|
admin = nil
|
2017-07-19 13:05:26 -04:00
|
|
|
loop do
|
2011-11-17 17:20:41 -05:00
|
|
|
begin
|
2017-07-19 13:05:26 -04:00
|
|
|
admin = connection.getAdmin
|
2011-11-17 17:20:41 -05:00
|
|
|
break
|
|
|
|
rescue MasterNotRunningException => e
|
|
|
|
print 'Waiting for master to start...\n'
|
|
|
|
sleep 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
|
|
|
|
meta_count = 0
|
|
|
|
server_count = 0
|
|
|
|
|
|
|
|
# scan META to see how many regions we should have
|
|
|
|
if $tablename.nil?
|
|
|
|
scan = Scan.new
|
|
|
|
else
|
|
|
|
tableNameMetaPrefix = $tablename + HConstants::META_ROW_DELIMITER.chr
|
|
|
|
scan = Scan.new(
|
|
|
|
(tableNameMetaPrefix + HConstants::META_ROW_DELIMITER.chr).to_java_bytes
|
|
|
|
)
|
|
|
|
end
|
2014-12-09 12:33:41 -05:00
|
|
|
scan.setCacheBlocks(false)
|
|
|
|
scan.setCaching(10)
|
2011-11-17 17:20:41 -05:00
|
|
|
scan.setFilter(FirstKeyOnlyFilter.new)
|
|
|
|
INFO = 'info'.to_java_bytes
|
|
|
|
REGION_INFO = 'regioninfo'.to_java_bytes
|
|
|
|
scan.addColumn INFO, REGION_INFO
|
|
|
|
table = nil
|
|
|
|
iter = nil
|
2017-07-19 13:05:26 -04:00
|
|
|
loop do
|
2011-11-17 17:20:41 -05:00
|
|
|
begin
|
2015-04-17 09:38:46 -04:00
|
|
|
table = connection.getTable(TableName.valueOf('hbase:meta'))
|
2011-11-17 17:20:41 -05:00
|
|
|
scanner = table.getScanner(scan)
|
|
|
|
iter = scanner.iterator
|
|
|
|
break
|
|
|
|
rescue IOException => ioe
|
|
|
|
print "Exception trying to scan META: #{ioe}"
|
|
|
|
sleep 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
while iter.hasNext
|
|
|
|
result = iter.next
|
2017-07-19 13:05:26 -04:00
|
|
|
rowid = Bytes.toString(result.getRow)
|
2011-11-17 17:20:41 -05:00
|
|
|
rowidStr = java.lang.String.new(rowid)
|
2017-07-19 13:05:26 -04:00
|
|
|
if !$tablename.nil? && !rowidStr.startsWith(tableNameMetaPrefix)
|
2011-11-17 17:20:41 -05:00
|
|
|
# Gone too far, break
|
|
|
|
break
|
|
|
|
end
|
2019-02-01 14:21:49 -05:00
|
|
|
|
2017-07-19 13:05:26 -04:00
|
|
|
region = MetaTableAccessor.getHRegionInfo(result)
|
|
|
|
unless region.isOffline
|
2011-11-17 17:20:41 -05:00
|
|
|
# only include regions that should be online
|
|
|
|
meta_count += 1
|
|
|
|
end
|
|
|
|
end
|
|
|
|
scanner.close
|
|
|
|
# If we're trying to see the status of all HBase tables, we need to include the
|
2014-05-27 13:36:55 -04:00
|
|
|
# hbase:meta table, that is not included in our scan
|
2017-07-19 13:05:26 -04:00
|
|
|
meta_count += 1 if $tablename.nil?
|
2011-11-17 17:20:41 -05:00
|
|
|
|
|
|
|
# query the master to see how many regions are on region servers
|
2017-07-19 13:05:26 -04:00
|
|
|
$TableName = TableName.valueOf($tablename.to_java_bytes) unless $tablename.nil?
|
|
|
|
loop do
|
2011-11-17 17:20:41 -05:00
|
|
|
if $tablename.nil?
|
2019-08-06 13:20:22 -04:00
|
|
|
server_count = admin.getClusterMetrics.getRegionCount
|
2011-11-17 17:20:41 -05:00
|
|
|
else
|
2017-07-19 13:05:26 -04:00
|
|
|
connection = ConnectionFactory.createConnection(config)
|
|
|
|
server_count = MetaTableAccessor.allTableRegions(connection, $TableName).size
|
2011-11-17 17:20:41 -05:00
|
|
|
end
|
|
|
|
print "Region Status: #{server_count} / #{meta_count}\n"
|
2017-07-19 13:05:26 -04:00
|
|
|
if SHOULD_WAIT && server_count < meta_count
|
|
|
|
# continue this loop until server & meta count match
|
2011-11-17 17:20:41 -05:00
|
|
|
sleep 10
|
|
|
|
else
|
|
|
|
break
|
|
|
|
end
|
|
|
|
end
|
2017-07-19 13:05:26 -04:00
|
|
|
admin.close
|
|
|
|
connection.close
|
2011-11-17 17:20:41 -05:00
|
|
|
|
|
|
|
exit server_count == meta_count ? 0 : 1
|