HBASE-4715 Remove stale broke .rb scripts from bin dir

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1196166 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-11-01 17:54:20 +00:00
parent f637b4d4e8
commit fcc01d275a
6 changed files with 1 additions and 517 deletions

View File

@ -66,6 +66,7 @@ Release 0.92.0 - Unreleased
HBASE-4503 Purge deprecated HBaseClusterTestCase HBASE-4503 Purge deprecated HBaseClusterTestCase
HBASE-4374 Up default regions size from 256M to 1G HBASE-4374 Up default regions size from 256M to 1G
HBASE-4648 Bytes.toBigDecimal() doesn't use offset (Brian Keller via Lars H) HBASE-4648 Bytes.toBigDecimal() doesn't use offset (Brian Keller via Lars H)
HBASE-4715 Remove stale broke .rb scripts from bin dir
BUG FIXES BUG FIXES
HBASE-3280 YouAreDeadException being swallowed in HRS getMaster HBASE-3280 YouAreDeadException being swallowed in HRS getMaster

View File

@ -1,82 +0,0 @@
#
# Copyright 2009 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script adds a table back to a running hbase.
# Currently only works on if table data is in place.
#
# To see usage for this script, run:
#
# ${HBASE_HOME}/bin/hbase org.jruby.Main addtable.rb
#
include Java
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.regionserver.HRegion
import org.apache.hadoop.hbase.HRegionInfo
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.util.FSUtils
import org.apache.hadoop.hbase.util.Writables
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem
import org.apache.commons.logging.LogFactory
# Name of this script
NAME = "add_region"
# Print usage for this script
def usage
puts 'Usage: %s.rb <PATH_TO_REGIONINFO>' % NAME
exit!
end
# Get configuration to use.
c = HBaseConfiguration.new()
# Set hadoop filesystem configuration using the hbase.rootdir.
# Otherwise, we'll always use localhost though the hbase.rootdir
# might be pointing at hdfs location.
c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
fs = FileSystem.get(c)
# Get a logger and a metautils instance.
LOG = LogFactory.getLog(NAME)
# Check arguments
if ARGV.size != 1
usage
end
# Get cmdline args.
regioninfo = fs.makeQualified(Path.new(java.lang.String.new(ARGV[0])))
if not fs.exists(srcdir)
raise IOError.new("regioninfo " + srcdir.toString() + " doesn't exist!")
end
is = fs.open(regioninfo)
hri = HRegionInfo.new()
hri.readFields(is)
is.close()
p = Put.new(hri.getRegionName())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
metaTable.put(p)
LOG.info("Added to catalog: " + hri.toString())

View File

@ -1,160 +0,0 @@
# Script looks at hbase .META. table verifying its content is coherent.
#
# To see usage for this script, run:
#
# ${HBASE_HOME}/bin/hbase org.jruby.Main check_meta.rb --help
#
# Copyright 2010 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
include Java
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.hbase.util.VersionInfo
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.util.FSUtils
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.util.Writables
import org.apache.hadoop.hbase.HRegionInfo
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.client.Put
# Name of this script
NAME = 'check_meta'
# Print usage for this script
def usage
puts 'Usage: %s.rb [--fix]' % NAME
puts ' fix Try to fixup meta issues'
puts 'Script checks consistency of the .META. table. It reports if .META. has missing entries.'
puts 'If you pass "--fix", it will try looking in the filesystem for the dropped region and if it'
puts 'finds a likely candidate, it will try pluggin the .META. hole.'
exit!
end
def isFixup
# Are we to do fixup during this run
usage if ARGV.size > 1
fixup = nil
if ARGV.size == 1
usage unless ARGV[0].downcase.match('--fix.*')
fixup = 1
end
return fixup
end
def getConfiguration
hbase_twenty = VersionInfo.getVersion().match('0\.20\..*')
# Get configuration to use.
if hbase_twenty
c = HBaseConfiguration.new()
else
c = HBaseConfiguration.create()
end
# Set hadoop filesystem configuration using the hbase.rootdir.
# Otherwise, we'll always use localhost though the hbase.rootdir
# might be pointing at hdfs location. Do old and new key for fs.
c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
c.set("fs.defaultFS", c.get(HConstants::HBASE_DIR))
return c
end
def fixup(leftEdge, rightEdge, metatable, fs, rootdir)
plugged = nil
# Try and fix the passed holes in meta.
tabledir = HTableDescriptor::getTableDir(rootdir, leftEdge.getTableDesc().getName())
statuses = fs.listStatus(tabledir)
for status in statuses
next unless status.isDir()
next if status.getPath().getName() == "compaction.dir"
regioninfofile = Path.new(status.getPath(), ".regioninfo")
unless fs.exists(regioninfofile)
LOG.warn("Missing .regioninfo: " + regioninfofile.toString())
next
end
is = fs.open(regioninfofile)
hri = HRegionInfo.new()
hri.readFields(is)
is.close()
next unless Bytes.equals(leftEdge.getEndKey(), hri.getStartKey())
# TODO: Check against right edge to make sure this addition does not overflow right edge.
# TODO: Check that the schema matches both left and right edges schemas.
p = Put.new(hri.getRegionName())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri))
metatable.put(p)
LOG.info("Plugged hole in .META. at: " + hri.toString())
plugged = true
end
return plugged
end
fixup = isFixup()
# Get configuration
conf = getConfiguration()
# Filesystem
fs = FileSystem.get(conf)
# Rootdir
rootdir = FSUtils.getRootDir(conf)
# Get a logger and a metautils instance.
LOG = LogFactory.getLog(NAME)
# Scan the .META. looking for holes
metatable = HTable.new(conf, HConstants::META_TABLE_NAME)
scan = Scan.new()
scanner = metatable.getScanner(scan)
oldHRI = nil
bad = nil
while (result = scanner.next())
rowid = Bytes.toString(result.getRow())
rowidStr = java.lang.String.new(rowid)
bytes = result.getValue(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER)
hri = Writables.getHRegionInfo(bytes)
if oldHRI
if oldHRI.isOffline() && Bytes.equals(oldHRI.getStartKey(), hri.getStartKey())
# Presume offlined parent
elsif Bytes.equals(oldHRI.getEndKey(), hri.getStartKey())
# Start key of next matches end key of previous
else
LOG.warn("hole after " + oldHRI.toString())
if fixup
bad = 1 unless fixup(oldHRI, hri, metatable, fs, rootdir)
else
bad = 1
end
end
end
oldHRI = hri
end
scanner.close()
if bad
LOG.info(".META. has holes")
else
LOG.info(".META. is healthy")
end
# Return 0 if meta is good, else non-zero.
exit bad

View File

@ -1,20 +0,0 @@
#
# Copyright 2009 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
puts 'DISABLED!!!! Use completebulkload instead. See tail of http://hbase.apache.org/bulk-loads.html'

View File

@ -1,168 +0,0 @@
#
# Copyright 2009 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Script that renames table in hbase. As written, will not work for rare
# case where there is more than one region in .META. table (You'd have to
# have a really massive hbase install). Does the update of the hbase
# .META. and moves the directories in the filesystem. Use at your own
# risk. Before running you must DISABLE YOUR TABLE:
#
# hbase> disable "YOUR_TABLE_NAME"
#
# Enable the new table after the script completes.
#
# hbase> enable "NEW_TABLE_NAME"
#
# To see usage for this script, run:
#
# ${HBASE_HOME}/bin/hbase org.jruby.Main rename_table.rb
#
include Java
import org.apache.hadoop.hbase.util.MetaUtils
import org.apache.hadoop.hbase.util.FSUtils
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.util.Writables
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.HRegionInfo
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.regionserver.HRegion
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory
import java.util.TreeMap
# Name of this script
NAME = "rename_table"
# Print usage for this script
def usage
puts 'Usage: %s.rb <OLD_NAME> <NEW_NAME>' % NAME
exit!
end
# Passed 'dir' exists and is a directory else exception
def isDirExists(fs, dir)
raise IOError.new("Does not exit: " + dir.toString()) unless fs.exists(dir)
raise IOError.new("Not a directory: " + dir.toString()) unless fs.isDirectory(dir)
end
# Returns true if the region belongs to passed table
def isTableRegion(tableName, hri)
return Bytes.equals(hri.getTableDesc().getName(), tableName)
end
# Create new HRI based off passed 'oldHRI'
def createHRI(tableName, oldHRI)
htd = oldHRI.getTableDesc()
newHtd = HTableDescriptor.new(tableName)
for family in htd.getFamilies()
newHtd.addFamily(family)
end
return HRegionInfo.new(newHtd, oldHRI.getStartKey(), oldHRI.getEndKey(),
oldHRI.isSplit())
end
# Check arguments
if ARGV.size != 2
usage
end
# Check good table names were passed.
oldTableName = ARGV[0]
newTableName = ARGV[1]
# Get configuration to use.
c = HBaseConfiguration.create()
# Set hadoop filesystem configuration using the hbase.rootdir.
# Otherwise, we'll always use localhost though the hbase.rootdir
# might be pointing at hdfs location.
c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
fs = FileSystem.get(c)
# If new table directory does not exit, create it. Keep going if already
# exists because maybe we are rerunning script because it failed first
# time. Otherwise we are overwriting a pre-existing table.
rootdir = FSUtils.getRootDir(c)
oldTableDir = fs.makeQualified(Path.new(rootdir, Path.new(oldTableName)))
isDirExists(fs, oldTableDir)
newTableDir = fs.makeQualified(Path.new(rootdir, newTableName))
if !fs.exists(newTableDir)
fs.mkdirs(newTableDir)
end
# Get a logger and a metautils instance.
LOG = LogFactory.getLog(NAME)
# Run through the meta table moving region mentions from old to new table name.
metaTable = HTable.new(c, HConstants::META_TABLE_NAME)
# TODO: Start the scan at the old table offset in .META.
scan = Scan.new()
scanner = metaTable.getScanner(scan)
while (result = scanner.next())
rowid = Bytes.toString(result.getRow())
oldHRI = Writables.getHRegionInfo(result.getValue(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER))
if !oldHRI
raise IOError.new("HRegionInfo is null for " + rowid)
end
next unless isTableRegion(oldTableName.to_java_bytes, oldHRI)
puts oldHRI.toString()
oldRDir = Path.new(oldTableDir, Path.new(oldHRI.getEncodedName().to_s))
if !fs.exists(oldRDir)
LOG.warn(oldRDir.toString() + " does not exist -- region " +
oldHRI.getRegionNameAsString())
else
# Now make a new HRegionInfo to add to .META. for the new region.
newHRI = createHRI(newTableName, oldHRI)
puts newHRI.toString()
newRDir = Path.new(newTableDir, Path.new(newHRI.getEncodedName().to_s))
# Move the region in filesystem
LOG.info("Renaming " + oldRDir.toString() + " as " + newRDir.toString())
fs.rename(oldRDir, newRDir)
# Removing old region from meta
LOG.info("Removing " + rowid + " from .META.")
d = Delete.new(result.getRow())
metaTable.delete(d)
# Create 'new' region
newR = HRegion.new(newTableDir, nil, fs, c, newHRI, nil)
# Add new row. NOTE: Presumption is that only one .META. region. If not,
# need to do the work to figure proper region to add this new region to.
LOG.info("Adding to meta: " + newR.toString())
bytes = Writables.getBytes(newR.getRegionInfo())
p = Put.new(newR.getRegionName())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, bytes)
metaTable.put(p)
# Finally update the .regioninfo under new region location so it has new name.
regioninfofile = Path.new(newR.getRegionDir(), HRegion::REGIONINFO_FILE)
fs.delete(regioninfofile, true)
out = fs.create(regioninfofile)
newR.getRegionInfo().write(out)
out.close()
end
end
scanner.close()
fs.delete(oldTableDir)
LOG.info("DONE");

View File

@ -1,87 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script must be used on a live cluster in order to fix .META.'s
# MEMSTORE_SIZE back to 64MB instead of the 16KB that was configured
# in 0.20 era. This is only required if .META. was created at that time.
#
# After running this script, HBase needs to be restarted.
#
# To see usage for this script, run:
#
# ${HBASE_HOME}/bin/hbase org.jruby.Main set_meta_memstore_size.rb
#
include Java
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.HRegionInfo
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.Delete
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.util.FSUtils
import org.apache.hadoop.hbase.util.Writables
import org.apache.hadoop.fs.Path
import org.apache.hadoop.fs.FileSystem
import org.apache.commons.logging.LogFactory
# Name of this script
NAME = "set_meta_memstore_size.rb"
# Print usage for this script
def usage
puts 'Usage: %s.rb]' % NAME
exit!
end
# Get configuration to use.
c = HBaseConfiguration.create()
# Set hadoop filesystem configuration using the hbase.rootdir.
# Otherwise, we'll always use localhost though the hbase.rootdir
# might be pointing at hdfs location.
c.set("fs.default.name", c.get(HConstants::HBASE_DIR))
fs = FileSystem.get(c)
# Get a logger and a metautils instance.
LOG = LogFactory.getLog(NAME)
# Check arguments
if ARGV.size > 0
usage
end
# Clean mentions of table from .META.
# Scan the .META. and remove all lines that begin with tablename
metaTable = HTable.new(c, HConstants::ROOT_TABLE_NAME)
scan = Scan.new()
scan.addColumn(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER);
scanner = metaTable.getScanner(scan)
while (result = scanner.next())
rowid = Bytes.toString(result.getRow())
LOG.info("Settting memstore to 64MB on : " + rowid);
hriValue = result.getValue(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER)
hri = Writables.getHRegionInfo(hriValue)
htd = hri.getTableDesc()
htd.setMemStoreFlushSize(64 * 1024 * 1024)
p = Put.new(result.getRow())
p.add(HConstants::CATALOG_FAMILY, HConstants::REGIONINFO_QUALIFIER, Writables.getBytes(hri));
metaTable.put(p)
end
scanner.close()