HBASE-487 Replace hql w/ a hbase-friendly jirb or jython shell

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@668880 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-06-17 23:58:05 +00:00
parent 542bff9b3b
commit 00cd1d2ca3
14 changed files with 445 additions and 127 deletions

View File

@ -1,7 +1,8 @@
# Results formatter # Results formatter
module Formatter module Formatter
class Formatter
# Base abstract class for results formatting. # Base abstract class for results formatting.
class Formatter
# Takes an output stream and a print width.
def initialize(o, w = 80) def initialize(o, w = 80)
raise TypeError.new("Type %s of parameter %s is not IO" % [o.class, o]) \ raise TypeError.new("Type %s of parameter %s is not IO" % [o.class, o]) \
unless o.instance_of? IO unless o.instance_of? IO
@ -10,16 +11,25 @@ module Formatter
@rowCount = 0 @rowCount = 0
end end
attr_reader :rowCount
def header(args = []) def header(args = [])
row(args) if args.length > 0 row(args, false) if args.length > 0
@rowCount = 0 @rowCount = 0
end end
def row(args = []) # Output a row.
# Inset is whether or not to offset row by a space.
def row(args = [], inset = true)
if not args or args.length == 0 if not args or args.length == 0
# Print out nothing # Print out nothing
return return
end end
if args.class == String
output(@maxWidth, args)
puts
return
end
# TODO: Look at the type. Is it RowResult? # TODO: Look at the type. Is it RowResult?
if args.length == 1 if args.length == 1
splits = split(@maxWidth, dump(args[0])) splits = split(@maxWidth, dump(args[0]))
@ -35,8 +45,15 @@ module Formatter
biggest = (splits2.length > splits1.length)? splits2.length: splits1.length biggest = (splits2.length > splits1.length)? splits2.length: splits1.length
index = 0 index = 0
while index < biggest while index < biggest
if inset
# Inset by one space if inset is set.
@out.print(" ") @out.print(" ")
end
output(col1width, splits1[index]) output(col1width, splits1[index])
if not inset
# Add extra space so second column lines up w/ second column output
@out.print(" ")
end
@out.print(" ") @out.print(" ")
output(col2width, splits2[index]) output(col2width, splits2[index])
index += 1 index += 1
@ -68,6 +85,9 @@ module Formatter
def dump(str) def dump(str)
# Remove double-quotes added by 'dump'. # Remove double-quotes added by 'dump'.
if str.instance_of? Fixnum
return
end
return str.dump.slice(1, str.length) return str.dump.slice(1, str.length)
end end
@ -82,7 +102,7 @@ module Formatter
return return
end end
# Only output elapsed time and row count if startTime passed # Only output elapsed time and row count if startTime passed
@out.puts("%d row(s) in %s seconds" % [@rowCount, Time.now - startTime]) @out.puts("%d row(s) in %.4f seconds" % [@rowCount, Time.now - startTime])
end end
end end

View File

@ -1,5 +1,33 @@
# HBase ruby classes # HBase ruby classes.
# Has wrapper classes for org.apache.hadoop.hbase.client.HBaseAdmin
# and for org.apache.hadoop.hbase.client.HTable. Classes take
# Formatters on construction and outputs any results using
# Formatter methods. These classes are only really for use by
# the hirb.rb HBase Shell script; they don't make much sense elsewhere.
# For example, the exists method on Admin class prints to the formatter
# whether the table exists and returns nil regardless.
include Java
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.io.BatchUpdate
import org.apache.hadoop.hbase.io.RowResult
import org.apache.hadoop.hbase.io.Cell
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.HTableDescriptor
module HBase module HBase
COLUMN = "COLUMN"
COLUMNS = "COLUMNS"
TIMESTAMP = "TIMESTAMP"
NAME = HConstants::NAME
VERSIONS = HConstants::VERSIONS
STOPROW = "STOPROW"
STARTROW = "STARTROW"
LIMIT = "LIMIT"
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
class Admin class Admin
def initialize(configuration, formatter) def initialize(configuration, formatter)
@admin = HBaseAdmin.new(configuration) @admin = HBaseAdmin.new(configuration)
@ -26,7 +54,7 @@ module HBase
end end
end end
if not found if not found
raise new ArgumentError.new("Failed to find table named " + tableName) raise ArgumentError.new("Failed to find table named " + tableName)
end end
@formatter.footer(now) @formatter.footer(now)
end end
@ -34,7 +62,8 @@ module HBase
def exists(tableName) def exists(tableName)
now = Time.now now = Time.now
@formatter.header() @formatter.header()
@formatter.row([@admin.tableExists(tableName).to_s]) e = @admin.tableExists(tableName)
@formatter.row([e.to_s])
@formatter.footer(now) @formatter.footer(now)
end end
@ -61,6 +90,7 @@ module HBase
@formatter.footer(now) @formatter.footer(now)
end end
# Pass tablename and an array of Hashes
def create(tableName, args) def create(tableName, args)
now = Time.now now = Time.now
# Pass table name and an array of Hashes. Later, test the last # Pass table name and an array of Hashes. Later, test the last
@ -71,10 +101,14 @@ module HBase
# hash specifications. TODO: Add table options handling. # hash specifications. TODO: Add table options handling.
htd = HTableDescriptor.new(tableName) htd = HTableDescriptor.new(tableName)
for arg in args for arg in args
if arg.instance_of? String
htd.addFamily(HColumnDescriptor.new(makeColumnName(arg)))
else
raise TypeError.new(arg.class.to_s + " of " + arg.to_s + " is not of Hash type") \ raise TypeError.new(arg.class.to_s + " of " + arg.to_s + " is not of Hash type") \
unless arg.instance_of? Hash unless arg.instance_of? Hash
htd.addFamily(hcd(arg)) htd.addFamily(hcd(arg))
end end
end
@admin.createTable(htd) @admin.createTable(htd)
@formatter.header() @formatter.header()
@formatter.footer(now) @formatter.footer(now)
@ -90,6 +124,18 @@ module HBase
@formatter.footer(now) @formatter.footer(now)
end end
# Make a legal column name of the passed String
# Check string ends in colon. If not, add it.
def makeColumnName(arg)
index = arg.index(':')
if not index
# Add a colon. If already a colon, its in the right place,
# or an exception will come up out of the addFamily
arg << ':'
end
arg
end
def hcd(arg) def hcd(arg)
# Return a new HColumnDescriptor made of passed args # Return a new HColumnDescriptor made of passed args
# TODO: This is brittle code. # TODO: This is brittle code.
@ -102,27 +148,204 @@ module HBase
name = arg[NAME] name = arg[NAME]
raise ArgumentError.new("Column family " + arg + " must have a name") \ raise ArgumentError.new("Column family " + arg + " must have a name") \
unless name unless name
# Check the family name for colon. Add it if missing. name = makeColumnName(name)
index = name.index(':')
if not index
# Add a colon. If already a colon, its in the right place,
# or an exception will come up out of the addFamily
name << ':'
end
# TODO: What encoding are Strings in jruby? # TODO: What encoding are Strings in jruby?
return HColumnDescriptor.new(name.to_java_bytes, return HColumnDescriptor.new(name.to_java_bytes,
# JRuby uses longs for ints. Need to convert. Also constants are String # JRuby uses longs for ints. Need to convert. Also constants are String
arg[MAX_VERSIONS]? arg[MAX_VERSIONS]: HColumnDescriptor::DEFAULT_MAX_VERSIONS, arg[VERSIONS]? arg[VERSIONS]: HColumnDescriptor::DEFAULT_VERSIONS,
arg[COMPRESSION]? HColumnDescriptor::CompressionType::valueOf(arg[COMPRESSION]): arg[HColumnDescriptor::COMPRESSION]? HColumnDescriptor::CompressionType::valueOf(arg[HColumnDescriptor::COMPRESSION]):
HColumnDescriptor::DEFAULT_COMPRESSION, HColumnDescriptor::DEFAULT_COMPRESSION,
arg[IN_MEMORY]? arg[IN_MEMORY]: HColumnDescriptor::DEFAULT_IN_MEMORY, arg[HColumnDescriptor::IN_MEMORY]? arg[HColumnDescriptor::IN_MEMORY]: HColumnDescriptor::DEFAULT_IN_MEMORY,
arg[BLOCKCACHE]? arg[BLOCKCACHE]: HColumnDescriptor::DEFAULT_BLOCKCACHE, arg[HColumnDescriptor::BLOCKCACHE]? arg[HColumnDescriptor::BLOCKCACHE]: HColumnDescriptor::DEFAULT_BLOCKCACHE,
arg[MAX_LENGTH]? arg[MAX_LENGTH]: HColumnDescriptor::DEFAULT_MAX_LENGTH, arg[HColumnDescriptor::LENGTH]? arg[HColumnDescriptor::LENGTH]: HColumnDescriptor::DEFAULT_LENGTH,
arg[TTL]? arg[TTL]: HColumnDescriptor::DEFAULT_TTL, arg[HColumnDescriptor::TTL]? arg[HColumnDescriptor::TTL]: HColumnDescriptor::DEFAULT_TTL,
arg[BLOOMFILTER]? arg[BLOOMFILTER]: HColumnDescriptor::DEFAULT_BLOOMFILTER) arg[HColumnDescriptor::BLOOMFILTER]? arg[HColumnDescriptor::BLOOMFILTER]: HColumnDescriptor::DEFAULT_BLOOMFILTER)
end end
end end
# Wrapper for org.apache.hadoop.hbase.client.HTable
class Table class Table
def initialize(configuration, tableName, formatter)
@table = HTable.new(configuration, tableName)
@formatter = formatter
end
# Delete a cell
def delete(row, args)
now = Time.now
bu = nil
if timestamp
bu = BatchUpdate.new(row, timestamp)
else
bu = BatchUpdate.new(row)
end
bu.delete(column)
@table.commit(bu)
@formatter.header()
@formatter.footer(now)
end
def deleteall(row, column = nil, timestamp = HConstants::LATEST_TIMESTAMP)
now = Time.now
@table.deleteAll(row, column, timestamp)
@formatter.header()
@formatter.footer(now)
end
def deletefc(row, column_family, timestamp = HConstants::LATEST_TIMESTAMP)
now = Time.now
@table.deleteFamily(row, column_family)
@formatter.header()
@formatter.footer(now)
end
def scan(columns, args = {})
now = Time.now
if not columns or columns.length < 1
raise ArgumentError.new("Must supply an array of columns to scan")
end
cs = columns.to_java(java.lang.String)
limit = -1
if args == nil or args.length <= 0
s = @table.getScanner(cs)
else
limit = args["LIMIT"] || -1
filter = args["FILTER"] || nil
startrow = args["STARTROW"] || ""
stoprow = args["STOPROW"] || nil
timestamp = args["TIMESTAMP"] || HConstants::LATEST_TIMESTAMP
if stoprow
s = @table.getScanner(cs, startrow, stoprow, timestamp)
else
s = @table.getScanner(cs, startrow, timestamp, filter)
end
end
count = 0
@formatter.header(["Row", "Column+Cell"])
i = s.iterator()
while i.hasNext()
r = i.next()
row = String.from_java_bytes r.getRow()
for k, v in r
column = String.from_java_bytes k
cell = v.toString()
@formatter.row([row, "column=%s, %s" % [column, v.toString()]])
end
count += 1
if limit != -1 and count >= limit
break
end
end
@formatter.footer(now)
end
def put(row, column, value, timestamp = nil)
now = Time.now
bu = nil
if timestamp
bu = BatchUpdate.new(row)
else
bu = BatchUpdate.new(row)
end
bu.put(column, value.to_java_bytes)
@table.commit(bu)
@formatter.header()
@formatter.footer(now)
end
# Get from table
def get(row, args = {})
now = Time.now
result = nil
if args == nil or args.length == 0
result = @table.getRow(row.to_java_bytes)
else
# Its a hash.
columns = args[COLUMN]
if columns == nil
# Maybe they used the COLUMNS key
columns = args[COLUMNS]
end
if columns == nil
# May have passed TIMESTAMP and row only; wants all columns from ts.
ts = args[TIMESTAMP]
if not ts
raise ArgumentError.new("Failed parse of " + args + ", " + args.class)
end
result = @table.getRow(row.to_java_bytes, ts)
else
# Columns are non-nil
if columns.class == String
# Single column
result = @table.get(row, columns,
args[TIMESTAMP]? args[TIMESTAMP]: HConstants::LATEST_TIMESTAMP,
args[VERSIONS]? args[VERSIONS]: 1)
elsif columns.class == Array
result = @table.getRow(row, columns.to_java(:string),
args[TIMESTAMP]? args[TIMESTAMP]: HConstants::LATEST_TIMESTAMP)
else
raise ArgumentError.new("Failed parse column argument type " +
args + ", " + args.class)
end
end
end
# Print out results. Result can be Cell or RowResult.
h = nil
if result.instance_of? RowResult
h = String.from_java_bytes result.getRow()
@formatter.header(["Column", "Cell"])
if result
for column, cell in result
v = String.from_java_bytes cell.getValue()
ts = cell.getTimestamp()
@formatter.row([(String.from_java_bytes column), cell.toString()])
end
end
else
# Presume Cells
@formatter.header()
if result
for c in result
@formatter.row([c.toString()])
end
end
end
@formatter.footer(now)
end
end
# Do a bit of testing.
# To run this test, do: ./bin/hbase org.jruby.Main bin/HBase.rb
if $0 == __FILE__
# Add this directory to LOAD_PATH; presumption is that Formatter module
# sits beside this one. Then load it up.
$LOAD_PATH.unshift File.dirname($PROGRAM_NAME)
require 'Formatter'
# Make a console formatter
formatter = Formatter::Console.new(STDOUT)
# Now add in java and hbase classes
configuration = HBaseConfiguration.new()
admin = Admin.new(configuration, formatter)
# Create a table; drop old one if exists first.
TESTTABLE = "HBase_rb_testtable"
begin
admin.disable(TESTTABLE)
admin.drop(TESTTABLE)
rescue org.apache.hadoop.hbase.TableNotFoundException
# Just suppress not found exception
end
admin.create(TESTTABLE, [{NAME => 'x', VERSIONS => 5}])
# Presume it exists. If it doesn't, next items will fail.
table = Table.new(configuration, TESTTABLE, formatter)
for i in 1..10
table.put('x', 'x:%d' % i, 'x%d' % i)
end
table.get('x', {COLUMN => 'x:1'})
if formatter.rowCount() != 1
raise IOError.new("Failed first put")
end
table.scan(['x:'])
admin.disable(TESTTABLE)
admin.drop(TESTTABLE)
end end
end end

View File

@ -8,17 +8,11 @@
# has to time out. # has to time out.
# TODO: Add support for listing and manipulating catalog tables, etc. # TODO: Add support for listing and manipulating catalog tables, etc.
# TODO: Fix 'irb: warn: can't alias help from irb_help.' in banner message # TODO: Fix 'irb: warn: can't alias help from irb_help.' in banner message
# TODO: Encoding; need to know how to go from ruby String to UTF-8 bytes
# Run the java magic include and import basic HBase types that will help ease # Run the java magic include and import basic HBase types that will help ease
# hbase hacking. # hbase hacking.
include Java include Java
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.HTable
import org.apache.hadoop.hbase.client.HBaseAdmin
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.HConstants
import org.apache.hadoop.hbase.HTableDescriptor
import org.apache.hadoop.hbase.io.BatchUpdate
# Some goodies for hirb. Should these be left up to the user's discretion? # Some goodies for hirb. Should these be left up to the user's discretion?
require 'irb/completion' require 'irb/completion'
@ -63,8 +57,12 @@ for arg in found
end end
# Setup the HBase module. Create a configuration. If a master, set it. # Setup the HBase module. Create a configuration. If a master, set it.
@configuration = HBaseConfiguration.new() # Turn off retries in hbase and ipc. Human doesn't want to wait on N retries.
@configuration = org.apache.hadoop.hbase.HBaseConfiguration.new()
@configuration.set("hbase.master", master) if master @configuration.set("hbase.master", master) if master
@configuration.setInt("hbase.client.retries.number", 3)
@configuration.setInt("ipc.client.connect.max.retries", 3)
# Do lazy create of admin because if we are pointed at bad master, it will hang # Do lazy create of admin because if we are pointed at bad master, it will hang
# shell on startup trying to connect. # shell on startup trying to connect.
@admin = nil @admin = nil
@ -81,8 +79,9 @@ def promoteConstants(constants)
end end
end end
end end
promoteConstants(HColumnDescriptor.constants) promoteConstants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
promoteConstants(HTableDescriptor.constants) promoteConstants(org.apache.hadoop.hbase.HTableDescriptor.constants)
promoteConstants(HBase.constants)
# Start of the hbase shell commands. # Start of the hbase shell commands.
@ -96,48 +95,86 @@ def help
# TODO: Add help to the commands themselves rather than keep it distinct # TODO: Add help to the commands themselves rather than keep it distinct
h = <<HERE h = <<HERE
HBASE SHELL COMMANDS: HBASE SHELL COMMANDS:
alter Alter column family schema in a table. Pass table name and a alter Alter column family schema; pass table name and a dictionary
dictionary specifying the new column family schema. Dictionaries specifying new column family schema. Dictionaries are described
are described below in the GENERAL NOTES section. Dictionary must below in the GENERAL NOTES section. Dictionary must include name
include name of column family to alter. For example, to change of column family to alter. For example, to change the 'f1' column
the 'f1' column family in table 't1' to have a MAX_VERSIONS of 5, family in table 't1' from defaults to instead keep a maximum of 5
do: cell VERSIONS, do:
hbase> alter 't1', {NAME => 'f1', MAX_VERSIONS => 5} hbase> alter 't1', {NAME => 'f1', VERSIONS => 5}
create Create table; pass a table name, a dictionary of specifications per create Create table; pass table name, a dictionary of specifications per
column family, and optionally, named parameters of table options. column family, and optionally a dictionary of table configuration.
Dictionaries are described below in the GENERAL NOTES section. Named Dictionaries are described below in the GENERAL NOTES section.
parameters are like dictionary elements with uppercase names For example, to create a table named 't1' with a single family named
(constants) as keys and a '=>' key/value delimiter. Parameters are 'f1' with an alternate maximum number of cells, type:
comma-delimited. For example, to create a table named 't1' with an
alternate maximum region size and a single family named 'f1' with an
alternate maximum number of cells and 'record' compression, type:
hbase> create 't1' {NAME => 'f1', MAX_VERSIONS => 5, \ hbase> create 't1' {NAME => 'f1', VERSIONS => 5}
COMPRESSION => 'RECORD'}, REGION_SIZE => 1024
For compression types, pass one of 'NONE', 'RECORD', or 'BLOCK' describe Describe the named table: e.g. "hbase> describe 't1'"
delete Put a delete cell value at specified table/row/column and optionally
timestamp coordinates. Deletes must match the deleted cell's
coordinates exactly. When scanning, a delete cell suppresses older
versions. Takes arguments like 'put' described below
deleteall Delete all cells; pass a table name, row and optionally, a column
and timestamp
deletefc Delete all in the named column family. Pass table name and family
describe Describe the named table. Outputs the table and family descriptors
drop Drop the named table. Table must first be disabled drop Drop the named table. Table must first be disabled
disable Disable the named table: e.g. "disable 't1'<RETURN>"
disable Disable the named table: e.g. "hbase> disable 't1'"
enable Enable the named table enable Enable the named table
exists Does the named table exist? e.g. "exists 't1'<RETURN>"
exit Exit the shell exists Does the named table exist? e.g. "hbase> exists 't1'"
list List all tables
exit Type "hbase> exit" to leave the HBase Shell
get Get row or cell contents; pass table name, row, and optionally
a dictionary of column(s), timestamp and versions. Examples:
hbase> get 't1', 'r1'
hbase> get 't1', 'r1', {COLUMN => 'c1'}
hbase> get 't1', 'r1', {COLUMN => ['c1', 'c2', 'c3']}
hbase> get 't1', 'r1', {TIMESTAMP => ts1, VERSIONS => 4}
list List all tables in hbase
put Put a cell value at specified table/row/column and optionally
timestamp coordinates. To put a cell value into table 't1' at
row 'r1' under column 'c1' marked with the time 'ts1', do:
hbase> put 't1', 'r1', 'c1', ts1
scan Scan a table; pass table name and an array of column names.
Optionally, pass a dictionary of options that includes one or more
of the following: LIMIT, FILTER, STARTROW, STOPROW, and TIMESTAMP.
For example, to scan column 'c1', and 'c2', in table 't1' returning
10 rows only:
hbase> scan 't1', ['c1', 'c2'], {LIMIT => 10}
version Output this HBase version version Output this HBase version
GENERAL NOTES: GENERAL NOTES:
Quote all names in the hbase shell such as table and column names. Don't Quote all names in the hbase shell such as table and column names. Don't
forget commas delimiting command parameters. Dictionaries of configuration used forget commas delimit command parameters. Type <RETURN> after entering a
in the creation and alteration of tables are ruby-style Hashes. They look like command to run it. Dictionaries of configuration used in the creation and
this: { 'key1' => 'value1', 'key2' => 'value2', ...}. They are opened and alteration of tables are ruby Hashes. They look like this:
closed with curley-braces. Key/values are delimited by the '=>' character
combination. Usually keys are predefined constants such as NAME, MAX_VERSIONS, {'key1' => 'value1', 'key2' => 'value2', ...}
COMPRESSION, MAX_LENGTH, TTL, etc. Constants do not need to be quoted. Type
They are opened and closed with curley-braces. Key/values are delimited by
the '=>' character combination. Usually keys are predefined constants such as
NAME, VERSIONS, COMPRESSION, etc. Constants do not need to be quoted. Type
'Object.constants' to see a (messy) list of all constants in the environment. 'Object.constants' to see a (messy) list of all constants in the environment.
See http://wiki.apache.org/hadoop/Hbase/Shell for more on the HBase Shell.
This HBase shell is the JRuby IRB with the above HBase-specific commands added.
For more on the HBase Shell, see http://wiki.apache.org/hadoop/Hbase/Shell
HERE HERE
puts h puts h
end end
@ -156,16 +193,21 @@ def admin()
@admin @admin
end end
def create(table_name, *args) def table(table)
admin().create(table_name, args) # Create new one each time
HBase::Table.new(@configuration, table, @formatter)
end end
def drop(table_name) def create(table, *args)
admin().drop(table_name) admin().create(table, args)
end end
def alter(table_name, args) def drop(table)
admin().alter(table_name, args) admin().drop(table)
end
def alter(table, args)
admin().alter(table, args)
end end
# Administration # Administration
@ -174,38 +216,48 @@ def list
admin().list() admin().list()
end end
def describe(table_name) def describe(table)
admin().describe(table_name) admin().describe(table)
end end
def enable(table_name) def enable(table)
admin().enable(table_name) admin().enable(table)
end end
def disable(table_name) def disable(table)
admin().disable(table_name) admin().disable(table)
end end
def exists(table_name) def exists(table)
admin().exists(table_name) admin().exists(table)
end end
# CRUD # CRUD
def get(table_name, row_key, *args) def get(table, row, args = {})
puts "Not implemented yet" table(table).get(row, args)
end end
def put(table_name, row_key, *args) def put(table, row, column, value, timestamp = nil)
puts "Not implemented yet" table(table).put(row, column, value, timestamp)
end end
def scan(table_name, start_key, end_key, *args) def scan(table, columns, args = {})
puts "Not implemented yet" table(table).scan(columns, args)
end end
def delete(table_name, row_key, *args) def delete(table, row, *args)
puts "Not implemented yet" table(table).get(row, args)
end
def deleteall(table, row, column = nil,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP)
table(table).get(row, column, timestamp)
end
def deletefc(table, row, column_family,
timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP)
table(table).get(row, column_family, timestamp)
end end
# Output a banner message that tells users where to go for help # Output a banner message that tells users where to go for help
@ -226,8 +278,8 @@ module IRB
# @EXTEND_COMMANDS.each{|x| puts x if x[0] == :irb_help} # @EXTEND_COMMANDS.each{|x| puts x if x[0] == :irb_help}
end end
# Subclass of IRB so can intercept methods
class HIRB < Irb class HIRB < Irb
# Subclass irb so can intercept methods
def output_value def output_value
# Suppress output if last_value is 'nil' # Suppress output if last_value is 'nil'

View File

@ -57,12 +57,10 @@ public class HColumnDescriptor implements WritableComparable {
} }
// Defines for jruby/shell // Defines for jruby/shell
public static final String NAME = "NAME";
public static final String MAX_VERSIONS = "MAX_VERSIONS";
public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION = "COMPRESSION";
public static final String IN_MEMORY = "IN_MEMORY"; public static final String IN_MEMORY = "IN_MEMORY";
public static final String BLOCKCACHE = "BLOCKCACHE"; public static final String BLOCKCACHE = "BLOCKCACHE";
public static final String MAX_LENGTH = "MAX_LENGTH"; public static final String LENGTH = "LENGTH";
public static final String TTL = "TTL"; public static final String TTL = "TTL";
public static final String BLOOMFILTER = "BLOOMFILTER"; public static final String BLOOMFILTER = "BLOOMFILTER";
public static final String FOREVER = "FOREVER"; public static final String FOREVER = "FOREVER";
@ -76,12 +74,12 @@ public class HColumnDescriptor implements WritableComparable {
/** /**
* Default number of versions of a record to keep. * Default number of versions of a record to keep.
*/ */
public static final int DEFAULT_MAX_VERSIONS = 3; public static final int DEFAULT_VERSIONS = 3;
/** /**
* Default maximum cell length. * Default maximum cell length.
*/ */
public static final int DEFAULT_MAX_LENGTH = Integer.MAX_VALUE; public static final int DEFAULT_LENGTH = Integer.MAX_VALUE;
/** /**
* Default setting for whether to serve from memory or not. * Default setting for whether to serve from memory or not.
@ -93,11 +91,6 @@ public class HColumnDescriptor implements WritableComparable {
*/ */
public static final boolean DEFAULT_BLOCKCACHE = false; public static final boolean DEFAULT_BLOCKCACHE = false;
/**
* Default maximum length of cell contents.
*/
public static final int DEFAULT_MAX_VALUE_LENGTH = Integer.MAX_VALUE;
/** /**
* Default time to live of cell contents. * Default time to live of cell contents.
*/ */
@ -111,7 +104,7 @@ public class HColumnDescriptor implements WritableComparable {
// Column family name // Column family name
private byte [] name; private byte [] name;
// Number of versions to keep // Number of versions to keep
private int maxVersions = DEFAULT_MAX_VERSIONS; private int maxVersions = DEFAULT_VERSIONS;
// Compression setting if any // Compression setting if any
private CompressionType compressionType = DEFAULT_COMPRESSION; private CompressionType compressionType = DEFAULT_COMPRESSION;
// Serve reads from in-memory cache // Serve reads from in-memory cache
@ -119,7 +112,7 @@ public class HColumnDescriptor implements WritableComparable {
// Serve reads from in-memory block cache // Serve reads from in-memory block cache
private boolean blockCacheEnabled = DEFAULT_BLOCKCACHE; private boolean blockCacheEnabled = DEFAULT_BLOCKCACHE;
// Maximum value size // Maximum value size
private int maxValueLength = DEFAULT_MAX_LENGTH; private int maxValueLength = DEFAULT_LENGTH;
// Time to live of cell contents, in seconds from last timestamp // Time to live of cell contents, in seconds from last timestamp
private int timeToLive = DEFAULT_TTL; private int timeToLive = DEFAULT_TTL;
// True if bloom filter was specified // True if bloom filter was specified
@ -163,7 +156,7 @@ public class HColumnDescriptor implements WritableComparable {
public HColumnDescriptor(final byte [] columnName) { public HColumnDescriptor(final byte [] columnName) {
this (columnName == null || columnName.length <= 0? this (columnName == null || columnName.length <= 0?
HConstants.EMPTY_BYTE_ARRAY: columnName, HConstants.EMPTY_BYTE_ARRAY: columnName,
DEFAULT_MAX_VERSIONS, DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY, DEFAULT_VERSIONS, DEFAULT_COMPRESSION, DEFAULT_IN_MEMORY,
DEFAULT_BLOCKCACHE, DEFAULT_BLOCKCACHE,
Integer.MAX_VALUE, DEFAULT_TTL, Integer.MAX_VALUE, DEFAULT_TTL,
DEFAULT_BLOOMFILTER); DEFAULT_BLOOMFILTER);
@ -311,12 +304,12 @@ public class HColumnDescriptor implements WritableComparable {
/** {@inheritDoc} */ /** {@inheritDoc} */
@Override @Override
public String toString() { public String toString() {
return "{" + NAME + " => '" + Bytes.toString(name) + return "{" + HConstants.NAME + " => '" + Bytes.toString(name) +
"', " + MAX_VERSIONS + " => " + maxVersions + "', " + HConstants.VERSIONS + " => " + maxVersions +
", " + COMPRESSION + " => '" + this.compressionType + ", " + COMPRESSION + " => '" + this.compressionType +
"', " + IN_MEMORY + " => " + inMemory + "', " + IN_MEMORY + " => " + inMemory +
", " + BLOCKCACHE + " => " + blockCacheEnabled + ", " + BLOCKCACHE + " => " + blockCacheEnabled +
", " + MAX_LENGTH + " => " + maxValueLength + ", " + LENGTH + " => " + maxValueLength +
", " + TTL + " => " + ", " + TTL + " => " +
(timeToLive == HConstants.FOREVER ? "FOREVER" : (timeToLive == HConstants.FOREVER ? "FOREVER" :
Integer.toString(timeToLive)) + Integer.toString(timeToLive)) +

View File

@ -220,4 +220,6 @@ public interface HConstants {
"hbase.client.retries.number"; "hbase.client.retries.number";
public static final int DEFAULT_CLIENT_RETRIES = 5; public static final int DEFAULT_CLIENT_RETRIES = 5;
public static final String NAME = "NAME";
public static final String VERSIONS = "VERSIONS";
} }

View File

@ -339,7 +339,7 @@ public class HRegionInfo implements WritableComparable {
*/ */
@Override @Override
public String toString() { public String toString() {
return "REGION => {" + HColumnDescriptor.NAME + " => '" + return "REGION => {" + HConstants.NAME + " => '" +
this.regionNameStr + this.regionNameStr +
"', STARTKEY => '" + "', STARTKEY => '" +
Bytes.toString(this.startKey) + "', ENDKEY => '" + Bytes.toString(this.startKey) + "', ENDKEY => '" +

View File

@ -410,8 +410,7 @@ public class HConnectionManager implements HConstants {
metaLocation.getRegionInfo().getRegionName(), metaKey); metaLocation.getRegionInfo().getRegionName(), metaKey);
if (regionInfoRow == null) { if (regionInfoRow == null) {
throw new TableNotFoundException("Table '" + throw new TableNotFoundException(Bytes.toString(tableName));
Bytes.toString(tableName) + "' does not exist.");
} }
Cell value = regionInfoRow.get(COL_REGIONINFO); Cell value = regionInfoRow.get(COL_REGIONINFO);
@ -433,7 +432,7 @@ public class HConnectionManager implements HConstants {
if (regionInfo.isOffline()) { if (regionInfo.isOffline()) {
throw new RegionOfflineException("region offline: " + throw new RegionOfflineException("region offline: " +
regionInfo.getRegionName()); regionInfo.getRegionNameAsString());
} }
String serverAddress = String serverAddress =

View File

@ -1155,11 +1155,13 @@ public class HTable {
* *
* @param row The row to operate on * @param row The row to operate on
* @param family The column family to match * @param family The column family to match
* @param timestamp Timestamp to match
* @throws IOException * @throws IOException
*/ */
public void deleteFamily(final Text row, final Text family) throws IOException{ public void deleteFamily(final Text row, final Text family,
deleteFamily(row.getBytes(), family.getBytes(), final long timestamp)
HConstants.LATEST_TIMESTAMP); throws IOException{
deleteFamily(row.getBytes(), family.getBytes(), timestamp);
} }
/** /**
@ -1167,12 +1169,13 @@ public class HTable {
* *
* @param row The row to operate on * @param row The row to operate on
* @param family The column family to match * @param family The column family to match
* @param timestamp Timestamp to match
* @throws IOException * @throws IOException
*/ */
public void deleteFamily(final String row, final String family) public void deleteFamily(final String row, final String family,
final long timestamp)
throws IOException{ throws IOException{
deleteFamily(Bytes.toBytes(row), Bytes.toBytes(family), deleteFamily(Bytes.toBytes(row), Bytes.toBytes(family), timestamp);
HConstants.LATEST_TIMESTAMP);
} }
/** /**

View File

@ -45,7 +45,7 @@ public class BatchOperation implements Writable {
* Default constructor * Default constructor
*/ */
public BatchOperation() { public BatchOperation() {
this(null); this((byte [])null);
} }
/** /**
@ -56,6 +56,23 @@ public class BatchOperation implements Writable {
this(column, null); this(column, null);
} }
/**
* Creates a DELETE batch operation.
* @param column column name
*/
public BatchOperation(final String column) {
this(Bytes.toBytes(column), null);
}
/**
* Create a batch operation.
* @param column column name
* @param value column value. If non-null, this is a PUT operation.
*/
public BatchOperation(final String column, String value) {
this(Bytes.toBytes(column), Bytes.toBytes(value));
}
/** /**
* Create a batch operation. * Create a batch operation.
* @param column column name * @param column column name

View File

@ -42,6 +42,15 @@ public class Cell implements Writable {
timestamp = 0; timestamp = 0;
} }
/**
* Create a new Cell with a given value and timestamp. Used by HStore.
* @param value
* @param timestamp
*/
public Cell(String value, long timestamp) {
this(Bytes.toBytes(value), timestamp);
}
/** /**
* Create a new Cell with a given value and timestamp. Used by HStore. * Create a new Cell with a given value and timestamp. Used by HStore.
* @param value * @param value

View File

@ -124,7 +124,7 @@ abstract class TableOperation implements HConstants {
} }
if (!tableExists) { if (!tableExists) {
throw new TableNotFoundException(tableName + " does not exist"); throw new TableNotFoundException(Bytes.toString(tableName));
} }
postProcessMeta(m, server); postProcessMeta(m, server);

View File

@ -472,7 +472,7 @@ public class HStore implements HConstants {
Path p = datfiles[i].getPath(); Path p = datfiles[i].getPath();
// If does not have sympathetic info file, delete. // If does not have sympathetic info file, delete.
if (!mapfiles.contains(fs.makeQualified(p))) { if (!mapfiles.contains(fs.makeQualified(p))) {
fs.delete(p, false); fs.delete(p, true);
} }
} }
return results; return results;

View File

@ -19,12 +19,12 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
/** /**
* Thrown if request for nonexistent column family. * Thrown if request for nonexistent column family.
*/ */
public class NoSuchColumnFamilyException extends IOException { public class NoSuchColumnFamilyException extends DoNotRetryIOException {
public NoSuchColumnFamilyException() { public NoSuchColumnFamilyException() {
super(); super();
} }

View File

@ -173,7 +173,7 @@ public class TestBloomFilters extends HBaseClusterTestCase {
HColumnDescriptor.CompressionType.NONE, // no compression HColumnDescriptor.CompressionType.NONE, // no compression
HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory
HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_BLOCKCACHE,
HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, HColumnDescriptor.DEFAULT_LENGTH,
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_TTL,
bloomFilter bloomFilter
) )
@ -239,7 +239,7 @@ public class TestBloomFilters extends HBaseClusterTestCase {
HColumnDescriptor.CompressionType.NONE, // no compression HColumnDescriptor.CompressionType.NONE, // no compression
HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory HColumnDescriptor.DEFAULT_IN_MEMORY, // not in memory
HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_BLOCKCACHE,
HColumnDescriptor.DEFAULT_MAX_VALUE_LENGTH, HColumnDescriptor.DEFAULT_LENGTH,
HColumnDescriptor.DEFAULT_TTL, HColumnDescriptor.DEFAULT_TTL,
bloomFilter bloomFilter
) )