HBASE-842 Remove methods that have Text as a parameter and were deprecated in 0.2.1 (Jean-Daniel Cryans via Jim Kellerman)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@689938 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jim Kellerman 2008-08-28 19:27:44 +00:00
parent a1f8ee8ac1
commit 1491efc79b
17 changed files with 49 additions and 521 deletions

View File

@ -63,6 +63,8 @@ Release 0.18.0 - Unreleased
compilation problems that were introduced by patch.
HBASE-669 MultiRegion transactions with Optimistic Concurrency Control
(Clint Morgan via Stack)
HBASE-842 Remove methods that have Text as a parameter and were deprecated
in 0.2.1 (Jean-Daniel Cryans via Jim Kellerman)
OPTIMIZATIONS

View File

@ -137,16 +137,6 @@ public class HColumnDescriptor implements WritableComparable {
this(Bytes.toBytes(columnName));
}
/**
* Construct a column descriptor specifying only the family name
* The other attributes are defaulted.
*
* @param columnName - column family name
*/
public HColumnDescriptor(final Text columnName) {
this(columnName.getBytes());
}
/**
* Construct a column descriptor specifying only the family name
* The other attributes are defaulted.

View File

@ -24,7 +24,7 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Writable;
/**
@ -97,7 +97,7 @@ public class HMsg implements Writable {
private Type type = null;
private HRegionInfo info = null;
private Text message = null;
private byte[] message = null;
// Some useful statics. Use these rather than create a new HMsg each time.
public static final HMsg REPORT_EXITING = new HMsg(Type.MSG_REPORT_EXITING);
@ -141,7 +141,7 @@ public class HMsg implements Writable {
* null. If no info associated, used other Constructor.
* @param msg Optional message (Stringified exception, etc.)
*/
public HMsg(final HMsg.Type type, final HRegionInfo hri, final Text msg) {
public HMsg(final HMsg.Type type, final HRegionInfo hri, final byte[] msg) {
if (type == null) {
throw new NullPointerException("Message type cannot be null");
}
@ -172,7 +172,7 @@ public class HMsg implements Writable {
return this.type.equals(other);
}
public Text getMessage() {
public byte[] getMessage() {
return this.message;
}
@ -188,7 +188,7 @@ public class HMsg implements Writable {
sb.append(": ");
sb.append(this.info.getRegionNameAsString());
}
if (this.message != null && this.message.getLength() > 0) {
if (this.message != null && this.message.length > 0) {
sb.append(": " + this.message);
}
return sb.toString();
@ -221,11 +221,11 @@ public class HMsg implements Writable {
public void write(DataOutput out) throws IOException {
out.writeInt(this.type.ordinal());
this.info.write(out);
if (this.message == null || this.message.getLength() == 0) {
if (this.message == null || this.message.length == 0) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
this.message.write(out);
Bytes.writeByteArray(out, this.message);
}
}
@ -238,10 +238,7 @@ public class HMsg implements Writable {
this.info.readFields(in);
boolean hasMessage = in.readBoolean();
if (hasMessage) {
if (this.message == null) {
this.message = new Text();
}
this.message.readFields(in);
this.message = Bytes.readByteArray(in);
}
}
}

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
/**
@ -81,16 +80,6 @@ public class HBaseAdmin {
return this.connection.isMasterRunning();
}
/**
* @param tableName Table to check.
* @return True if table exists already.
* @throws MasterNotRunningException
*/
public boolean tableExists(final Text tableName)
throws MasterNotRunningException {
return tableExists(tableName.getBytes());
}
/**
* @param tableName Table to check.
* @return True if table exists already.
@ -195,16 +184,6 @@ public class HBaseAdmin {
}
}
/**
* Deletes a table
*
* @param tableName name of table to delete
* @throws IOException
*/
public void deleteTable(final Text tableName) throws IOException {
deleteTable(tableName.getBytes());
}
/**
* Deletes a table
*
@ -292,16 +271,6 @@ public class HBaseAdmin {
LOG.info("Deleted " + Bytes.toString(tableName));
}
/**
* Brings a table on-line (enables it)
*
* @param tableName name of the table
* @throws IOException
*/
public void enableTable(final Text tableName) throws IOException {
enableTable(tableName.getBytes());
}
/**
* Brings a table on-line (enables it)
*
@ -353,17 +322,6 @@ public class HBaseAdmin {
LOG.info("Enabled table " + Bytes.toString(tableName));
}
/**
* Disables a table (takes it off-line) If it is being served, the master
* will tell the servers to stop serving it.
*
* @param tableName name of table
* @throws IOException
*/
public void disableTable(final Text tableName) throws IOException {
disableTable(tableName.getBytes());
}
/**
* Disables a table (takes it off-line) If it is being served, the master
* will tell the servers to stop serving it.
@ -416,14 +374,6 @@ public class HBaseAdmin {
LOG.info("Disabled " + Bytes.toString(tableName));
}
/**
* @param tableName name of table to check
* @return true if table is on-line
* @throws IOException
*/
public boolean isTableEnabled(Text tableName) throws IOException {
return isTableEnabled(tableName.getBytes());
}
/**
* @param tableName name of table to check
* @return true if table is on-line
@ -441,18 +391,6 @@ public class HBaseAdmin {
return connection.isTableEnabled(tableName);
}
/**
* Add a column to an existing table
*
* @param tableName name of the table to add column to
* @param column column descriptor of column to be added
* @throws IOException
*/
public void addColumn(final Text tableName, HColumnDescriptor column)
throws IOException {
addColumn(tableName.getBytes(), column);
}
/**
* Add a column to an existing table
*
@ -485,18 +423,6 @@ public class HBaseAdmin {
}
}
/**
* Delete a column from a table
*
* @param tableName name of table
* @param columnName name of column to be deleted
* @throws IOException
*/
public void deleteColumn(final Text tableName, final Text columnName)
throws IOException {
deleteColumn(tableName.getBytes(), columnName.getBytes());
}
/**
* Delete a column from a table
*
@ -529,20 +455,6 @@ public class HBaseAdmin {
}
}
/**
* Modify an existing column family on a table
*
* @param tableName name of table
* @param columnName name of column to be modified
* @param descriptor new column descriptor to use
* @throws IOException
*/
public void modifyColumn(final Text tableName, final Text columnName,
HColumnDescriptor descriptor)
throws IOException {
modifyColumn(tableName.getBytes(), columnName.getBytes(), descriptor);
}
/**
* Modify an existing column family on a table
*

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
/**
* Used to communicate with a single HBase table
@ -53,18 +52,6 @@ public class HTable {
private final byte [] tableName;
private HBaseConfiguration configuration;
/**
* Creates an object to access a HBase table
*
* @param tableName name of the table
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public HTable(final Text tableName)
throws IOException {
this(new HBaseConfiguration(), tableName.getBytes());
}
/**
* Creates an object to access a HBase table
*
@ -87,19 +74,6 @@ public class HTable {
this(new HBaseConfiguration(), tableName);
}
/**
* Creates an object to access a HBase table
*
* @param conf configuration object
* @param tableName name of the table
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public HTable(HBaseConfiguration conf, final Text tableName)
throws IOException {
this(conf, tableName.getBytes());
}
/**
* Creates an object to access a HBase table
*
@ -127,15 +101,6 @@ public class HTable {
this.connection.locateRegion(tableName, HConstants.EMPTY_START_ROW);
}
/**
* @param tableName name of table to check
* @return true if table is on-line
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public static boolean isTableEnabled(Text tableName) throws IOException {
return isTableEnabled(tableName.getBytes());
}
/**
* @param tableName name of table to check
* @return true if table is on-line
@ -153,18 +118,6 @@ public class HTable {
return isTableEnabled(new HBaseConfiguration(), tableName);
}
/**
* @param conf HBaseConfiguration object
* @param tableName name of table to check
* @return true if table is on-line
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public static boolean isTableEnabled(HBaseConfiguration conf, Text tableName)
throws IOException {
return isTableEnabled(conf, tableName.getBytes());
}
/**
* @param conf HBaseConfiguration object
* @param tableName name of table to check
@ -187,18 +140,6 @@ public class HTable {
return HConnectionManager.getConnection(conf).isTableEnabled(tableName);
}
/**
* Find region location hosting passed row using cached info
* @param row Row to find.
* @return Location of row.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public HRegionLocation getRegionLocation(final Text row)
throws IOException {
return connection.getRegionLocation(tableName, row.getBytes(), false);
}
/**
* Find region location hosting passed row using cached info
* @param row Row to find.
@ -235,15 +176,6 @@ public class HTable {
return this.connection;
}
/**
* @return table metadata
* @throws IOException
*/
@Deprecated
public HTableDescriptor getMetadata() throws IOException {
return getTableDescriptor();
}
/**
* @return table metadata
* @throws IOException
@ -317,35 +249,6 @@ public class HTable {
return regionMap;
}
/**
* Get a single value for the specified row and column
*
* @param row row key
* @param column column name
* @return value for specified row/column
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Cell get(final Text row, final Text column)
throws IOException {
return get(row.getBytes(), column.getBytes());
}
/**
* Get a single value for the specified row and column
*
* @param row row key
* @param column column name
* @param numVersions - number of versions to retrieve
* @return value for specified row/column
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Cell[] get(final Text row, final Text column, int numVersions)
throws IOException {
return get(row.getBytes(), column.getBytes(), numVersions);
}
/**
* Get a single value for the specified row and column
*
@ -415,24 +318,6 @@ public class HTable {
);
}
/**
* Get the specified number of versions of the specified row and column with
* the specified timestamp.
*
* @param row - row key
* @param column - column name
* @param timestamp - timestamp
* @param numVersions - number of versions to retrieve
* @return - array of values that match the above criteria
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Cell[] get(final Text row, final Text column,
final long timestamp, final int numVersions)
throws IOException {
return get(row.getBytes(), column.getBytes(), timestamp, numVersions);
}
/**
* Get the specified number of versions of the specified row and column with
* the specified timestamp.
@ -484,18 +369,6 @@ public class HTable {
return null;
}
/**
* Get all the data for the specified row at the latest timestamp
*
* @param row row key
* @return RowResult is empty if row does not exist.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public RowResult getRow(final Text row) throws IOException {
return getRow(row.getBytes());
}
/**
* Get all the data for the specified row at the latest timestamp
*
@ -518,20 +391,6 @@ public class HTable {
return getRow(row, HConstants.LATEST_TIMESTAMP);
}
/**
* Get all the data for the specified row at a specified timestamp
*
* @param row row key
* @param ts timestamp
* @return RowResult is empty if row does not exist.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public RowResult getRow(final Text row, final long ts)
throws IOException {
return getRow(row.getBytes(), ts);
}
/**
* Get all the data for the specified row at a specified timestamp
*
@ -558,20 +417,6 @@ public class HTable {
return getRow(row,null,ts);
}
/**
* Get selected columns for the specified row at the latest timestamp
*
* @param row row key
* @param columns Array of column names and families you want to retrieve.
* @return RowResult is empty if row does not exist.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public RowResult getRow(final Text row, final Text[] columns)
throws IOException {
return getRow(row.getBytes(), Bytes.toByteArrays(columns));
}
/**
* Get selected columns for the specified row at the latest timestamp
*
@ -598,22 +443,6 @@ public class HTable {
return getRow(row, columns, HConstants.LATEST_TIMESTAMP);
}
/**
* Get selected columns for the specified row at a specified timestamp
*
* @param row row key
* @param columns Array of column names and families you want to retrieve.
* @param ts timestamp
* @return RowResult is empty if row does not exist.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public RowResult getRow(final Text row, final Text [] columns,
final long ts)
throws IOException {
return getRow(row.getBytes(), Bytes.toByteArrays(columns), ts);
}
/**
* Get selected columns for the specified row at a specified timestamp
*
@ -672,24 +501,6 @@ public class HTable {
);
}
/**
* Get a scanner on the current table starting at first row.
* Return the specified columns.
*
* @param columns columns to scan. If column name is a column family, all
* columns of the specified column family are returned. Its also possible
* to pass a regex in the column qualifier. A column qualifier is judged to
* be a regex if it contains at least one of the following characters:
* <code>\+|^&*$[]]}{)(</code>.
* @return scanner
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Scanner getScanner(final Text [] columns)
throws IOException {
return getScanner(Bytes.toByteArrays(columns), HConstants.EMPTY_START_ROW);
}
/**
* Get a scanner on the current table starting at first row.
* Return the specified columns.
@ -707,25 +518,6 @@ public class HTable {
return getScanner(Bytes.toByteArrays(columns), HConstants.EMPTY_START_ROW);
}
/**
* Get a scanner on the current table starting at the specified row.
* Return the specified columns.
*
* @param columns columns to scan. If column name is a column family, all
* columns of the specified column family are returned. Its also possible
* to pass a regex in the column qualifier. A column qualifier is judged to
* be a regex if it contains at least one of the following characters:
* <code>\+|^&*$[]]}{)(</code>.
* @param startRow starting row in table to scan
* @return scanner
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Scanner getScanner(final Text [] columns, final Text startRow)
throws IOException {
return getScanner(Bytes.toByteArrays(columns), startRow.getBytes());
}
/**
* Get a scanner on the current table starting at the specified row.
* Return the specified columns.
@ -844,32 +636,6 @@ public class HTable {
HConstants.LATEST_TIMESTAMP);
}
/**
* Get a scanner on the current table starting at the specified row and
* ending just before <code>stopRow<code>.
* Return the specified columns.
*
* @param columns columns to scan. If column name is a column family, all
* columns of the specified column family are returned. Its also possible
* to pass a regex in the column qualifier. A column qualifier is judged to
* be a regex if it contains at least one of the following characters:
* <code>\+|^&*$[]]}{)(</code>.
* @param startRow starting row in table to scan
* @param stopRow Row to stop scanning on. Once we hit this row we stop
* returning values; i.e. we return the row before this one but not the
* <code>stopRow</code> itself.
* @param timestamp only return results whose timestamp <= this value
* @return scanner
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Scanner getScanner(final Text[] columns,
final Text startRow, final Text stopRow, final long timestamp)
throws IOException {
return getScanner(Bytes.toByteArrays(columns), startRow.getBytes(),
stopRow.getBytes(), timestamp);
}
/**
* Get a scanner on the current table starting at the specified row and
* ending just before <code>stopRow<code>.
@ -920,29 +686,6 @@ public class HTable {
new WhileMatchRowFilter(new StopRowFilter(stopRow)));
}
/**
* Get a scanner on the current table starting at the specified row.
* Return the specified columns.
*
* @param columns columns to scan. If column name is a column family, all
* columns of the specified column family are returned. Its also possible
* to pass a regex in the column qualifier. A column qualifier is judged to
* be a regex if it contains at least one of the following characters:
* <code>\+|^&*$[]]}{)(</code>.
* @param startRow starting row in table to scan
* @param timestamp only return results whose timestamp <= this value
* @param filter a row filter using row-key regexp and/or column data filter.
* @return scanner
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public Scanner getScanner(Text[] columns,
Text startRow, long timestamp, RowFilterInterface filter)
throws IOException {
return getScanner(Bytes.toByteArrays(columns), startRow.getBytes(),
timestamp, filter);
}
/**
* Get a scanner on the current table starting at the specified row.
* Return the specified columns.
@ -998,17 +741,6 @@ public class HTable {
deleteAll(row, null);
}
/**
* Completely delete the row's cells.
*
* @param row Key of the row you want to completely delete.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public void deleteAll(final Text row) throws IOException {
deleteAll(row, null);
}
/**
* Completely delete the row's cells.
*
@ -1055,30 +787,6 @@ public class HTable {
deleteAll(row, null, ts);
}
/**
* Completely delete the row's cells.
*
* @param row Key of the row you want to completely delete.
* @param ts Delete all cells of the same timestamp or older.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public void deleteAll(final Text row, final long ts)
throws IOException {
deleteAll(row, null, ts);
}
/**
* Delete all cells that match the passed row and column.
* @param row Row to update
* @param column name of column whose value is to be deleted
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public void deleteAll(final Text row, final Text column) throws IOException {
deleteAll(row, column, HConstants.LATEST_TIMESTAMP);
}
/**
* Delete all cells that match the passed row and column.
* @param row Row to update
@ -1090,20 +798,6 @@ public class HTable {
deleteAll(row, column, HConstants.LATEST_TIMESTAMP);
}
/**
* Delete all cells that match the passed row and column and whose
* timestamp is equal-to or older than the passed timestamp.
* @param row Row to update
* @param column name of column whose value is to be deleted
* @param ts Delete all cells of the same timestamp or older.
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public void deleteAll(final Text row, final Text column, final long ts)
throws IOException {
deleteAll(row.getBytes(), column.getBytes(), ts);
}
/**
* Delete all cells that match the passed row and column and whose
* timestamp is equal-to or older than the passed timestamp.
@ -1169,14 +863,11 @@ public class HTable {
*
* @param row The row to operate on
* @param family The column family to match
* @param timestamp Timestamp to match
* @throws IOException
* @deprecated Use String or byte [] overload instead
*/
public void deleteFamily(final Text row, final Text family,
final long timestamp)
public void deleteFamily(final String row, final String family)
throws IOException {
deleteFamily(row.getBytes(), family.getBytes(), timestamp);
deleteFamily(row, family, HConstants.LATEST_TIMESTAMP);
}
/**
@ -1184,6 +875,19 @@ public class HTable {
*
* @param row The row to operate on
* @param family The column family to match
* @throws IOException
*/
public void deleteFamily(final byte[] row, final byte[] family)
throws IOException {
deleteFamily(row, family, HConstants.LATEST_TIMESTAMP);
}
/**
* Delete all cells for a row with matching column family with timestamps
* less than or equal to <i>timestamp</i>.
*
* @param row The row to operate on
* @param family The column family to match
* @param timestamp Timestamp to match
* @throws IOException
*/
@ -1337,12 +1041,6 @@ public class HTable {
private ScannerCallable callable = null;
protected RowFilterInterface filter;
protected ClientScanner(final Text [] columns, final Text startRow,
long timestamp, RowFilterInterface filter) {
this(Bytes.toByteArrays(columns), startRow.getBytes(), timestamp,
filter);
}
protected ClientScanner(final byte[][] columns, final byte [] startRow,
final long timestamp, final RowFilterInterface filter) {
if (CLIENT_LOG.isDebugEnabled()) {

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.io.RowResult;
public interface Scanner extends Closeable, Iterable<RowResult> {
/**
* Grab the next row's worth of values. The scanner will return a RowResult
* that contains both the row's key and a map of Text column names to Cell
* that contains both the row's key and a map of byte[] column names to Cell
* value objects. The data returned will only contain the most recent data
* value for each row that is not newer than the target time passed when the
* scanner was created.

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.TransactionalRegionInterface;
import org.apache.hadoop.io.Text;
/**
* Table with transactional support.
@ -46,11 +45,6 @@ public class TransactionalTable extends HTable {
super(conf, tableName);
}
public TransactionalTable(final HBaseConfiguration conf, final Text tableName)
throws IOException {
super(conf, tableName);
}
public TransactionalTable(final HBaseConfiguration conf,
final byte[] tableName) throws IOException {
super(conf, tableName);

View File

@ -27,7 +27,6 @@ import java.util.Iterator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
/**
@ -55,16 +54,6 @@ public class BatchUpdate implements Writable, Iterable<BatchOperation> {
this ((byte [])null);
}
/**
* Initialize a BatchUpdate operation on a row. Timestamp is assumed to be
* now.
*
* @param row
*/
public BatchUpdate(final Text row) {
this(row, HConstants.LATEST_TIMESTAMP);
}
/**
* Initialize a BatchUpdate operation on a row. Timestamp is assumed to be
* now.
@ -94,15 +83,6 @@ public class BatchUpdate implements Writable, Iterable<BatchOperation> {
this(Bytes.toBytes(row), timestamp);
}
/**
* Initialize a BatchUpdate operation on a row with a specific timestamp.
*
* @param row
*/
public BatchUpdate(final Text row, long timestamp){
this(row.getBytes(), timestamp);
}
/**
* Initialize a BatchUpdate operation on a row with a specific timestamp.
*
@ -133,16 +113,6 @@ public class BatchUpdate implements Writable, Iterable<BatchOperation> {
this.timestamp = timestamp;
}
/**
* Change a value for the specified column
*
* @param column column whose value is being set
* @param val new value for column. Cannot be null (can be empty).
*/
public synchronized void put(final Text column, final byte val[]) {
put(column.getBytes(), val);
}
/**
* Change a value for the specified column
*
@ -167,16 +137,6 @@ public class BatchUpdate implements Writable, Iterable<BatchOperation> {
operations.add(new BatchOperation(column, val));
}
/**
* Delete the value for a column
* Deletes the cell whose row/column/commit-timestamp match those of the
* delete.
* @param column name of column whose value is to be deleted
*/
public void delete(final Text column) {
delete(column.getBytes());
}
/**
* Delete the value for a column
* Deletes the cell whose row/column/commit-timestamp match those of the

View File

@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
/**
* Class to manage assigning regions to servers, state of root and meta, etc.
@ -74,7 +73,7 @@ class RegionManager implements HConstants {
Collections.synchronizedSortedMap(new TreeMap<byte [],
MetaRegion>(Bytes.BYTES_COMPARATOR));
private static final Text OVERLOADED = new Text("Overloaded");
private static final byte[] OVERLOADED = Bytes.toBytes("Overloaded");
/**
* The 'unassignedRegions' table maps from a HRegionInfo to a timestamp that

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.LeaseException;
import org.apache.hadoop.hbase.Leases;
import org.apache.hadoop.hbase.LeaseListener;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.io.Text;
/**
* The ServerManager class manages info about region servers - HServerInfo,
@ -437,7 +436,7 @@ class ServerManager implements HConstants {
// Otherwise the HMaster will think the Region was closed on purpose,
// and then try to reopen it elsewhere; that's not what we want.
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_CLOSE_WITHOUT_REPORT,
region, new Text("Duplicate assignment")));
region, "Duplicate assignment".getBytes()));
} else {
// it was assigned, and it's not a duplicate assignment, so take it out
// of the unassigned list.

View File

@ -86,7 +86,6 @@ import org.apache.hadoop.hbase.util.InfoServer;
import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.util.Progressable;
@ -742,11 +741,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
/* Add to the outbound message buffer */
private void reportClose(final HRegionInfo region, final Text message) {
private void reportClose(final HRegionInfo region, final byte[] message) {
outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_CLOSE, region, message));
}
/**
* Add to the outbound message buffer
*
@ -761,9 +759,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
HRegionInfo newRegionB) {
outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_SPLIT, oldRegion,
new Text(oldRegion.getRegionNameAsString() + " split; daughters: " +
(oldRegion.getRegionNameAsString() + " split; daughters: " +
newRegionA.getRegionNameAsString() + ", " +
newRegionB.getRegionNameAsString())));
newRegionB.getRegionNameAsString()).getBytes()));
outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_OPEN, newRegionA));
outboundMsgs.add(new HMsg(HMsg.Type.MSG_REPORT_OPEN, newRegionB));
}
@ -884,7 +882,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
// TODO: add an extra field in HRegionInfo to indicate that there is
// an error. We can't do that now because that would be an incompatible
// change that would require a migration
reportClose(regionInfo, new Text(StringUtils.stringifyException(e)));
reportClose(regionInfo, StringUtils.stringifyException(e).getBytes());
return;
}
this.lock.writeLock().lock();

View File

@ -9,7 +9,6 @@ import java.util.Comparator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
@ -240,18 +239,6 @@ public class Bytes {
}
/**
* @param t
* @return Array of byte arrays made from passed array of Text
*/
public static byte [][] toByteArrays(final Text [] t) {
byte [][] result = new byte[t.length][];
for (int i = 0; i < t.length; i++) {
result[i] = t[i].getBytes();
}
return result;
}
/**
* @param t
* @return Array of byte arrays made from passed array of Text

View File

@ -35,13 +35,11 @@ import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.Text;
/** test the scanner API at all levels */
public class TestScannerAPI extends HBaseClusterTestCase {
private final byte [][] columns = Bytes.toByteArrays(new Text[] {
new Text("a:"),
new Text("b:")
private final byte [][] columns = Bytes.toByteArrays(new String[] {
"a:", "b:"
});
private final byte [] startRow = Bytes.toBytes("0");

View File

@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.io.HbaseMapWritable;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
/**
* Test HBase Writables serializations
@ -50,7 +49,7 @@ public class TestSerialization extends HBaseTestCase {
m = new HMsg(HMsg.Type.MSG_REGIONSERVER_QUIESCE,
new HRegionInfo(new HTableDescriptor(getName()),
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY),
new Text("Some message"));
"Some message".getBytes());
mb = Writables.getBytes(m);
deserializedHMsg = (HMsg)Writables.getWritable(mb, new HMsg());
assertTrue(m.equals(deserializedHMsg));

View File

@ -26,7 +26,6 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.dfs.MiniDFSCluster;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.util.Bytes;
@ -112,8 +111,8 @@ public class TestGet extends HBaseTestCase {
batchUpdate.put(HConstants.COL_SERVER,
Bytes.toBytes(new HServerAddress(SERVER_ADDRESS).toString()));
batchUpdate.put(HConstants.COL_STARTCODE, Bytes.toBytes(12345));
batchUpdate.put(new Text(Bytes.toString(HConstants.COLUMN_FAMILY) +
"region"), Bytes.toBytes("region"));
batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) +
"region", Bytes.toBytes("region"));
r.commit(batchUpdate);
// Verify that get works the same from memcache as when reading from disk
@ -134,14 +133,12 @@ public class TestGet extends HBaseTestCase {
// Update one family member and add a new one
batchUpdate = new BatchUpdate(ROW_KEY, System.currentTimeMillis());
batchUpdate.put(new Text(Bytes.toString(HConstants.COLUMN_FAMILY) +
"region"),
batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) + "region",
"region2".getBytes(HConstants.UTF8_ENCODING));
String otherServerName = "bar.foo.com:4321";
batchUpdate.put(HConstants.COL_SERVER,
Bytes.toBytes(new HServerAddress(otherServerName).toString()));
batchUpdate.put(new Text(Bytes.toString(HConstants.COLUMN_FAMILY) +
"junk"),
batchUpdate.put(Bytes.toString(HConstants.COLUMN_FAMILY) + "junk",
"junk".getBytes(HConstants.UTF8_ENCODING));
r.commit(batchUpdate);

View File

@ -24,7 +24,6 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
@ -115,7 +114,7 @@ public class TestLogRolling extends HBaseClusterTestCase {
for (int i = 1; i <= 256; i++) { // 256 writes should cause 8 log rolls
BatchUpdate b =
new BatchUpdate(new Text("row" + String.format("%1$04d", i)));
new BatchUpdate("row" + String.format("%1$04d", i));
b.put(HConstants.COLUMN_FAMILY, value);
table.commit(b);

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.ToolRunner;
/** Test stand alone merge tool that can merge arbitrary regions */
@ -90,11 +89,11 @@ public class TestMergeTool extends HBaseTestCase {
* Now create some row keys
*/
this.rows = new byte [5][][];
this.rows[0] = Bytes.toByteArrays(new Text[] { new Text("row_0210"), new Text("row_0280") });
this.rows[1] = Bytes.toByteArrays(new Text[] { new Text("row_0260"), new Text("row_0350") });
this.rows[2] = Bytes.toByteArrays(new Text[] { new Text("row_0110"), new Text("row_0175") });
this.rows[3] = Bytes.toByteArrays(new Text[] { new Text("row_0525"), new Text("row_0560") });
this.rows[4] = Bytes.toByteArrays(new Text[] { new Text("row_0050"), new Text("row_1000") });
this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350" });
this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175" });
this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560" });
this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000" });
// Start up dfs
this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);