HBASE-489 CellValue class for transporting cell timestamp with cell value simultaneously
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@635033 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
da6dde97ce
commit
935d300957
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A non-instantiable class that manages connections to multiple tables in
|
* A non-instantiable class that manages connections to multiple tables in
|
||||||
|
@ -358,13 +359,12 @@ public class HConnectionManager implements HConstants {
|
||||||
* Convenience method for turning a MapWritable into the underlying
|
* Convenience method for turning a MapWritable into the underlying
|
||||||
* SortedMap we all know and love.
|
* SortedMap we all know and love.
|
||||||
*/
|
*/
|
||||||
private SortedMap<Text, byte[]> sortedMapFromMapWritable(
|
private SortedMap<Text, Cell> sortedMapFromMapWritable(
|
||||||
HbaseMapWritable writable) {
|
HbaseMapWritable writable) {
|
||||||
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
SortedMap<Text, Cell> results = new TreeMap<Text, Cell>();
|
||||||
for (Map.Entry<Writable, Writable> e: writable.entrySet()) {
|
for (Map.Entry<Writable, Writable> e: writable.entrySet()) {
|
||||||
HStoreKey key = (HStoreKey) e.getKey();
|
HStoreKey key = (HStoreKey) e.getKey();
|
||||||
results.put(key.getColumn(),
|
results.put(key.getColumn(), (Cell)e.getValue());
|
||||||
((ImmutableBytesWritable) e.getValue()).get());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return results;
|
return results;
|
||||||
|
@ -423,19 +423,19 @@ public class HConnectionManager implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert the MapWritable into a Map we can use
|
// convert the MapWritable into a Map we can use
|
||||||
SortedMap<Text, byte[]> results =
|
SortedMap<Text, Cell> results =
|
||||||
sortedMapFromMapWritable(regionInfoRow);
|
sortedMapFromMapWritable(regionInfoRow);
|
||||||
|
|
||||||
byte[] bytes = results.get(COL_REGIONINFO);
|
Cell value = results.get(COL_REGIONINFO);
|
||||||
|
|
||||||
if (bytes == null || bytes.length == 0) {
|
if (value == null || value.getValue().length == 0) {
|
||||||
throw new IOException("HRegionInfo was null or empty in " +
|
throw new IOException("HRegionInfo was null or empty in " +
|
||||||
parentTable);
|
parentTable);
|
||||||
}
|
}
|
||||||
|
|
||||||
// convert the row result into the HRegionLocation we need!
|
// convert the row result into the HRegionLocation we need!
|
||||||
HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
|
HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
|
||||||
results.get(COL_REGIONINFO), new HRegionInfo());
|
value.getValue(), new HRegionInfo());
|
||||||
|
|
||||||
// possible we got a region of a different table...
|
// possible we got a region of a different table...
|
||||||
if (!regionInfo.getTableDesc().getName().equals(tableName)) {
|
if (!regionInfo.getTableDesc().getName().equals(tableName)) {
|
||||||
|
@ -448,8 +448,10 @@ public class HConnectionManager implements HConstants {
|
||||||
regionInfo.getRegionName());
|
regionInfo.getRegionName());
|
||||||
}
|
}
|
||||||
|
|
||||||
String serverAddress =
|
Cell serverValue = results.get(COL_SERVER);
|
||||||
Writables.bytesToString(results.get(COL_SERVER));
|
|
||||||
|
String serverAddress = Writables.bytesToString(
|
||||||
|
serverValue == null ? null : serverValue.getValue());
|
||||||
|
|
||||||
if (serverAddress.equals("")) {
|
if (serverAddress.equals("")) {
|
||||||
throw new NoServerForRegionException(
|
throw new NoServerForRegionException(
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||||
import org.apache.hadoop.hbase.filter.StopRowFilter;
|
import org.apache.hadoop.hbase.filter.StopRowFilter;
|
||||||
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
|
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
|
@ -257,14 +258,14 @@ public class HTable implements HConstants {
|
||||||
* @return value for specified row/column
|
* @return value for specified row/column
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte[] get(Text row, final Text column) throws IOException {
|
public Cell get(final Text row, final Text column) throws IOException {
|
||||||
return getRegionServerWithRetries(new ServerCallable<byte[]>(row){
|
return getRegionServerWithRetries(new ServerCallable<Cell>(row){
|
||||||
public byte[] call() throws IOException {
|
public Cell call() throws IOException {
|
||||||
return server.get(location.getRegionInfo().getRegionName(), row, column);
|
return server.get(location.getRegionInfo().getRegionName(), row, column);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the specified number of versions of the specified row and column
|
* Get the specified number of versions of the specified row and column
|
||||||
*
|
*
|
||||||
|
@ -274,27 +275,27 @@ public class HTable implements HConstants {
|
||||||
* @return - array byte values
|
* @return - array byte values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte[][] get(final Text row, final Text column, final int numVersions)
|
public Cell[] get(final Text row, final Text column, final int numVersions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
byte [][] values = null;
|
Cell[] values = null;
|
||||||
|
|
||||||
values = getRegionServerWithRetries(new ServerCallable<byte[][]>(row) {
|
values = getRegionServerWithRetries(new ServerCallable<Cell[]>(row) {
|
||||||
public byte [][] call() throws IOException {
|
public Cell[] call() throws IOException {
|
||||||
return server.get(location.getRegionInfo().getRegionName(), row,
|
return server.get(location.getRegionInfo().getRegionName(), row,
|
||||||
column, numVersions);
|
column, numVersions);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (values != null) {
|
if (values != null) {
|
||||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
ArrayList<Cell> cellValues = new ArrayList<Cell>();
|
||||||
for (int i = 0 ; i < values.length; i++) {
|
for (int i = 0 ; i < values.length; i++) {
|
||||||
bytes.add(values[i]);
|
cellValues.add(values[i]);
|
||||||
}
|
}
|
||||||
return bytes.toArray(new byte[values.length][]);
|
return cellValues.toArray(new Cell[values.length]);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the specified number of versions of the specified row and column with
|
* Get the specified number of versions of the specified row and column with
|
||||||
* the specified timestamp.
|
* the specified timestamp.
|
||||||
|
@ -306,28 +307,28 @@ public class HTable implements HConstants {
|
||||||
* @return - array of values that match the above criteria
|
* @return - array of values that match the above criteria
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte[][] get(final Text row, final Text column, final long timestamp,
|
public Cell[] get(final Text row, final Text column, final long timestamp,
|
||||||
final int numVersions)
|
final int numVersions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
byte [][] values = null;
|
Cell[] values = null;
|
||||||
|
|
||||||
values = getRegionServerWithRetries(new ServerCallable<byte[][]>(row) {
|
values = getRegionServerWithRetries(new ServerCallable<Cell[]>(row) {
|
||||||
public byte [][] call() throws IOException {
|
public Cell[] call() throws IOException {
|
||||||
return server.get(location.getRegionInfo().getRegionName(), row,
|
return server.get(location.getRegionInfo().getRegionName(), row,
|
||||||
column, timestamp, numVersions);
|
column, timestamp, numVersions);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (values != null) {
|
if (values != null) {
|
||||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
ArrayList<Cell> cellValues = new ArrayList<Cell>();
|
||||||
for (int i = 0 ; i < values.length; i++) {
|
for (int i = 0 ; i < values.length; i++) {
|
||||||
bytes.add(values[i]);
|
cellValues.add(values[i]);
|
||||||
}
|
}
|
||||||
return bytes.toArray(new byte[values.length][]);
|
return cellValues.toArray(new Cell[values.length]);
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get all the data for the specified row at the latest timestamp
|
* Get all the data for the specified row at the latest timestamp
|
||||||
*
|
*
|
||||||
|
@ -335,7 +336,7 @@ public class HTable implements HConstants {
|
||||||
* @return Map of columns to values. Map is empty if row does not exist.
|
* @return Map of columns to values. Map is empty if row does not exist.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public SortedMap<Text, byte[]> getRow(Text row) throws IOException {
|
public SortedMap<Text, Cell> getRow(final Text row) throws IOException {
|
||||||
return getRow(row, HConstants.LATEST_TIMESTAMP);
|
return getRow(row, HConstants.LATEST_TIMESTAMP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -347,7 +348,7 @@ public class HTable implements HConstants {
|
||||||
* @return Map of columns to values. Map is empty if row does not exist.
|
* @return Map of columns to values. Map is empty if row does not exist.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public SortedMap<Text, byte[]> getRow(final Text row, final long ts)
|
public SortedMap<Text, Cell> getRow(final Text row, final long ts)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HbaseMapWritable value = null;
|
HbaseMapWritable value = null;
|
||||||
|
|
||||||
|
@ -357,18 +358,16 @@ public class HTable implements HConstants {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
SortedMap<Text, Cell> results = new TreeMap<Text, Cell>();
|
||||||
if (value != null && value.size() != 0) {
|
if (value != null && value.size() != 0) {
|
||||||
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
|
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
|
||||||
HStoreKey key = (HStoreKey) e.getKey();
|
HStoreKey key = (HStoreKey) e.getKey();
|
||||||
results.put(key.getColumn(),
|
results.put(key.getColumn(), (Cell)e.getValue());
|
||||||
((ImmutableBytesWritable) e.getValue()).get());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a scanner on the current table starting at the specified row.
|
* Get a scanner on the current table starting at the specified row.
|
||||||
* Return the specified columns.
|
* Return the specified columns.
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Selects values from tables.
|
* Selects values from tables.
|
||||||
|
@ -113,7 +114,7 @@ public class SelectCommand extends BasicCommand {
|
||||||
try {
|
try {
|
||||||
if (version != 0) {
|
if (version != 0) {
|
||||||
// A number of versions has been specified.
|
// A number of versions has been specified.
|
||||||
byte[][] result = null;
|
Cell[] result = null;
|
||||||
ParsedColumns parsedColumns = getColumns(admin, false);
|
ParsedColumns parsedColumns = getColumns(admin, false);
|
||||||
boolean multiple = parsedColumns.isMultiple() || version > 1;
|
boolean multiple = parsedColumns.isMultiple() || version > 1;
|
||||||
for (Text column : parsedColumns.getColumns()) {
|
for (Text column : parsedColumns.getColumns()) {
|
||||||
|
@ -128,15 +129,15 @@ public class SelectCommand extends BasicCommand {
|
||||||
for (int ii = 0; result != null && ii < result.length; ii++) {
|
for (int ii = 0; result != null && ii < result.length; ii++) {
|
||||||
if (multiple) {
|
if (multiple) {
|
||||||
formatter.row(new String[] { column.toString(),
|
formatter.row(new String[] { column.toString(),
|
||||||
toString(column, result[ii]) });
|
toString(column, result[ii].getValue()) });
|
||||||
} else {
|
} else {
|
||||||
formatter.row(new String[] { toString(column, result[ii]) });
|
formatter.row(new String[] { toString(column, result[ii].getValue()) });
|
||||||
}
|
}
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (Map.Entry<Text, byte[]> e : table.getRow(rowKey).entrySet()) {
|
for (Map.Entry<Text, Cell> e : table.getRow(rowKey).entrySet()) {
|
||||||
if (count == 0) {
|
if (count == 0) {
|
||||||
formatter.header(isMultiple() ? HEADER_COLUMN_CELL : null);
|
formatter.header(isMultiple() ? HEADER_COLUMN_CELL : null);
|
||||||
}
|
}
|
||||||
|
@ -145,7 +146,7 @@ public class SelectCommand extends BasicCommand {
|
||||||
if (!columns.contains(ASTERISK) && !columns.contains(keyStr)) {
|
if (!columns.contains(ASTERISK) && !columns.contains(keyStr)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
String cellData = toString(key, e.getValue());
|
String cellData = toString(key, e.getValue().getValue());
|
||||||
if (isMultiple()) {
|
if (isMultiple()) {
|
||||||
formatter.row(new String[] { key.toString(), cellData });
|
formatter.row(new String[] { key.toString(), cellData });
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
/**
|
||||||
|
* Copyright 2008 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.io;
|
||||||
|
|
||||||
|
import java.io.DataInput;
|
||||||
|
import java.io.DataOutput;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.io.Writable;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Cell - Used to transport a cell value (byte[]) and the timestamp it was
|
||||||
|
* stored with together as a result for get and getRow methods. This promotes
|
||||||
|
* the timestamp of a cell to a first-class value, making it easy to take
|
||||||
|
* note of temporal data. Cell is used all the way from HStore up to HTable.
|
||||||
|
*/
|
||||||
|
public class Cell implements Writable {
|
||||||
|
protected byte[] value;
|
||||||
|
protected long timestamp;
|
||||||
|
|
||||||
|
/** For Writable compatibility */
|
||||||
|
public Cell() {
|
||||||
|
value = null;
|
||||||
|
timestamp = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new Cell with a given value and timestamp. Used by HStore.
|
||||||
|
*/
|
||||||
|
public Cell(byte[] value, long timestamp) {
|
||||||
|
this.value = value;
|
||||||
|
this.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the cell's value.
|
||||||
|
*
|
||||||
|
* @return cell's value
|
||||||
|
*/
|
||||||
|
public byte[] getValue() {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get teh cell's timestamp
|
||||||
|
*
|
||||||
|
* @return cell's timestamp
|
||||||
|
*/
|
||||||
|
public long getTimestamp() {
|
||||||
|
return timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Writable
|
||||||
|
//
|
||||||
|
|
||||||
|
public void readFields(final DataInput in) throws IOException {
|
||||||
|
timestamp = in.readLong();
|
||||||
|
int valueSize = in.readInt();
|
||||||
|
value = new byte[valueSize];
|
||||||
|
in.readFully(value, 0, valueSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void write(final DataOutput out) throws IOException {
|
||||||
|
out.writeLong(timestamp);
|
||||||
|
out.writeInt(value.length);
|
||||||
|
out.write(value);
|
||||||
|
}
|
||||||
|
}
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A Writable Map.
|
* A Writable Map.
|
||||||
|
@ -59,6 +60,7 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
||||||
addToMap(HStoreKey.class, code++);
|
addToMap(HStoreKey.class, code++);
|
||||||
addToMap(ImmutableBytesWritable.class, code++);
|
addToMap(ImmutableBytesWritable.class, code++);
|
||||||
addToMap(Text.class, code++);
|
addToMap(Text.class, code++);
|
||||||
|
addToMap(Cell.class, code++);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("boxing")
|
@SuppressWarnings("boxing")
|
||||||
|
@ -124,6 +126,14 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public Writable put(Writable key, Writable value) {
|
public Writable put(Writable key, Writable value) {
|
||||||
|
if (!CLASS_TO_CODE.containsKey(key.getClass())) {
|
||||||
|
throw new NullPointerException("Unsupported class " +
|
||||||
|
key.getClass() + " cannot be used as a key.");
|
||||||
|
}
|
||||||
|
if (!CLASS_TO_CODE.containsKey(value.getClass())) {
|
||||||
|
throw new NullPointerException("Unsupported class " +
|
||||||
|
value.getClass() + " cannot be used as a value.");
|
||||||
|
}
|
||||||
return instance.put(key, value);
|
return instance.put(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,4 +214,4 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
||||||
instance.put(key, value);
|
instance.put(key, value);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.io.ObjectWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.Writable;
|
import org.apache.hadoop.io.Writable;
|
||||||
import org.apache.hadoop.io.WritableFactories;
|
import org.apache.hadoop.io.WritableFactories;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a customized version of the polymorphic hadoop
|
* This is a customized version of the polymorphic hadoop
|
||||||
|
@ -108,12 +109,17 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
||||||
addToMap(HRegionInfo.class, code++);
|
addToMap(HRegionInfo.class, code++);
|
||||||
addToMap(BatchUpdate.class, code++);
|
addToMap(BatchUpdate.class, code++);
|
||||||
addToMap(HServerAddress.class, code++);
|
addToMap(HServerAddress.class, code++);
|
||||||
addToMap(HRegionInfo.class, code++);
|
|
||||||
try {
|
try {
|
||||||
addToMap(Class.forName("[Lorg.apache.hadoop.hbase.HMsg;"), code++);
|
addToMap(Class.forName("[Lorg.apache.hadoop.hbase.HMsg;"), code++);
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
}
|
}
|
||||||
|
addToMap(Cell.class, code++);
|
||||||
|
try {
|
||||||
|
addToMap(Class.forName("[Lorg.apache.hadoop.hbase.io.Cell;"), code++);
|
||||||
|
} catch (ClassNotFoundException e) {
|
||||||
|
e.printStackTrace();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private Class<?> declaredClass;
|
private Class<?> declaredClass;
|
||||||
|
@ -344,4 +350,4 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
||||||
return this.conf;
|
return this.conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
*/
|
*/
|
||||||
public interface HRegionInterface extends VersionedProtocol {
|
public interface HRegionInterface extends VersionedProtocol {
|
||||||
/** initial version */
|
/** initial version */
|
||||||
public static final long versionID = 1L;
|
public static final long versionID = 2L;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get metainfo about an HRegion
|
* Get metainfo about an HRegion
|
||||||
|
@ -57,7 +58,7 @@ public interface HRegionInterface extends VersionedProtocol {
|
||||||
* @return alue for that region/row/column
|
* @return alue for that region/row/column
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [] get(final Text regionName, final Text row, final Text column)
|
public Cell get(final Text regionName, final Text row, final Text column)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -70,7 +71,7 @@ public interface HRegionInterface extends VersionedProtocol {
|
||||||
* @return array of values
|
* @return array of values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(final Text regionName, final Text row,
|
public Cell[] get(final Text regionName, final Text row,
|
||||||
final Text column, final int numVersions)
|
final Text column, final int numVersions)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
@ -86,8 +87,8 @@ public interface HRegionInterface extends VersionedProtocol {
|
||||||
* @return array of values
|
* @return array of values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(final Text regionName, final Text row,
|
public Cell[] get(final Text regionName, final Text row,
|
||||||
final Text column, final long timestamp, final int numVersions)
|
final Text column, final long timestamp, final int numVersions)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -383,7 +383,7 @@ public class HLog implements HConstants {
|
||||||
new HLogKey(regionName, tableName, key.getRow(), seqNum[counter++]);
|
new HLogKey(regionName, tableName, key.getRow(), seqNum[counter++]);
|
||||||
HLogEdit logEdit =
|
HLogEdit logEdit =
|
||||||
new HLogEdit(key.getColumn(), es.getValue(), key.getTimestamp());
|
new HLogEdit(key.getColumn(), es.getValue(), key.getTimestamp());
|
||||||
this.writer.append(logKey, logEdit);
|
this.writer.append(logKey, logEdit);
|
||||||
this.numEntries++;
|
this.numEntries++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||||
import org.apache.hadoop.hbase.io.BatchOperation;
|
import org.apache.hadoop.hbase.io.BatchOperation;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
import org.apache.hadoop.hbase.util.Writables;
|
import org.apache.hadoop.hbase.util.Writables;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
|
@ -1011,8 +1012,8 @@ public class HRegion implements HConstants {
|
||||||
* @return column value
|
* @return column value
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [] get(Text row, Text column) throws IOException {
|
public Cell get(Text row, Text column) throws IOException {
|
||||||
byte [][] results = get(row, column, Long.MAX_VALUE, 1);
|
Cell[] results = get(row, column, Long.MAX_VALUE, 1);
|
||||||
return (results == null || results.length == 0)? null: results[0];
|
return (results == null || results.length == 0)? null: results[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1025,7 +1026,7 @@ public class HRegion implements HConstants {
|
||||||
* @return array of values one element per version
|
* @return array of values one element per version
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(Text row, Text column, int numVersions) throws IOException {
|
public Cell[] get(Text row, Text column, int numVersions) throws IOException {
|
||||||
return get(row, column, Long.MAX_VALUE, numVersions);
|
return get(row, column, Long.MAX_VALUE, numVersions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1039,7 +1040,7 @@ public class HRegion implements HConstants {
|
||||||
* @return array of values one element per version that matches the timestamp
|
* @return array of values one element per version that matches the timestamp
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(Text row, Text column, long timestamp, int numVersions)
|
public Cell[] get(Text row, Text column, long timestamp, int numVersions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
if (this.closed.get()) {
|
if (this.closed.get()) {
|
||||||
|
@ -1072,7 +1073,7 @@ public class HRegion implements HConstants {
|
||||||
* @return Map<columnName, byte[]> values
|
* @return Map<columnName, byte[]> values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Map<Text, byte []> getFull(Text row) throws IOException {
|
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||||
return getFull(row, HConstants.LATEST_TIMESTAMP);
|
return getFull(row, HConstants.LATEST_TIMESTAMP);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,11 +1092,11 @@ public class HRegion implements HConstants {
|
||||||
* @return Map<columnName, byte[]> values
|
* @return Map<columnName, byte[]> values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Map<Text, byte []> getFull(Text row, long ts) throws IOException {
|
public Map<Text, Cell> getFull(Text row, long ts) throws IOException {
|
||||||
HStoreKey key = new HStoreKey(row, ts);
|
HStoreKey key = new HStoreKey(row, ts);
|
||||||
obtainRowLock(row);
|
obtainRowLock(row);
|
||||||
try {
|
try {
|
||||||
TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
|
TreeMap<Text, Cell> result = new TreeMap<Text, Cell>();
|
||||||
for (Text colFamily: stores.keySet()) {
|
for (Text colFamily: stores.keySet()) {
|
||||||
HStore targetStore = stores.get(colFamily);
|
HStore targetStore = stores.get(colFamily);
|
||||||
targetStore.getFull(key, result);
|
targetStore.getFull(key, result);
|
||||||
|
@ -1116,7 +1117,7 @@ public class HRegion implements HConstants {
|
||||||
* @return map of values
|
* @return map of values
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Map<Text, byte[]> getClosestRowBefore(final Text row, final long ts)
|
public Map<Text, Cell> getClosestRowBefore(final Text row, final long ts)
|
||||||
throws IOException{
|
throws IOException{
|
||||||
// look across all the HStores for this region and determine what the
|
// look across all the HStores for this region and determine what the
|
||||||
// closest key is across all column families, since the data may be sparse
|
// closest key is across all column families, since the data may be sparse
|
||||||
|
@ -1150,7 +1151,7 @@ public class HRegion implements HConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
// now that we've found our key, get the values
|
// now that we've found our key, get the values
|
||||||
TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
|
TreeMap<Text, Cell> result = new TreeMap<Text, Cell>();
|
||||||
for (Text colFamily: stores.keySet()) {
|
for (Text colFamily: stores.keySet()) {
|
||||||
HStore targetStore = stores.get(colFamily);
|
HStore targetStore = stores.get(colFamily);
|
||||||
targetStore.getFull(key, result);
|
targetStore.getFull(key, result);
|
||||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
||||||
|
@ -905,14 +906,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte [] get(final Text regionName, final Text row,
|
public Cell get(final Text regionName, final Text row, final Text column)
|
||||||
final Text column) throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
try {
|
try {
|
||||||
return getRegion(regionName).get(row, column);
|
return getRegion(regionName).get(row, column);
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -920,14 +919,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte [][] get(final Text regionName, final Text row,
|
public Cell[] get(final Text regionName, final Text row,
|
||||||
final Text column, final int numVersions) throws IOException {
|
final Text column, final int numVersions)
|
||||||
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
try {
|
try {
|
||||||
return getRegion(regionName).get(row, column, numVersions);
|
return getRegion(regionName).get(row, column, numVersions);
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -935,14 +933,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte [][] get(final Text regionName, final Text row, final Text column,
|
public Cell[] get(final Text regionName, final Text row, final Text column,
|
||||||
final long timestamp, final int numVersions) throws IOException {
|
final long timestamp, final int numVersions)
|
||||||
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
try {
|
try {
|
||||||
return getRegion(regionName).get(row, column, timestamp, numVersions);
|
return getRegion(regionName).get(row, column, timestamp, numVersions);
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -951,26 +948,23 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public HbaseMapWritable getRow(final Text regionName, final Text row)
|
public HbaseMapWritable getRow(final Text regionName, final Text row)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return getRow(regionName, row, HConstants.LATEST_TIMESTAMP);
|
return getRow(regionName, row, HConstants.LATEST_TIMESTAMP);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public HbaseMapWritable getRow(final Text regionName, final Text row, final long ts)
|
public HbaseMapWritable getRow(final Text regionName, final Text row, final long ts)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
try {
|
try {
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
HbaseMapWritable result = new HbaseMapWritable();
|
HbaseMapWritable result = new HbaseMapWritable();
|
||||||
Map<Text, byte[]> map = region.getFull(row, ts);
|
Map<Text, Cell> map = region.getFull(row, ts);
|
||||||
for (Map.Entry<Text, byte []> es: map.entrySet()) {
|
for (Map.Entry<Text, Cell> es: map.entrySet()) {
|
||||||
result.put(new HStoreKey(row, es.getKey()),
|
result.put(new HStoreKey(row, es.getKey()), es.getValue());
|
||||||
new ImmutableBytesWritable(es.getValue()));
|
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
checkFileSystem();
|
checkFileSystem();
|
||||||
throw e;
|
throw e;
|
||||||
|
@ -988,7 +982,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
public HbaseMapWritable getClosestRowBefore(final Text regionName,
|
public HbaseMapWritable getClosestRowBefore(final Text regionName,
|
||||||
final Text row, final long ts)
|
final Text row, final long ts)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
|
||||||
checkOpen();
|
checkOpen();
|
||||||
requestCount.incrementAndGet();
|
requestCount.incrementAndGet();
|
||||||
try {
|
try {
|
||||||
|
@ -996,14 +989,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
||||||
HRegion region = getRegion(regionName);
|
HRegion region = getRegion(regionName);
|
||||||
HbaseMapWritable result = new HbaseMapWritable();
|
HbaseMapWritable result = new HbaseMapWritable();
|
||||||
// ask the region for all the data
|
// ask the region for all the data
|
||||||
Map<Text, byte[]> map = region.getClosestRowBefore(row, ts);
|
Map<Text, Cell> map = region.getClosestRowBefore(row, ts);
|
||||||
// convert to a MapWritable
|
// convert to a MapWritable
|
||||||
if (map == null) {
|
if (map == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
for (Map.Entry<Text, byte []> es: map.entrySet()) {
|
for (Map.Entry<Text, Cell> es: map.entrySet()) {
|
||||||
result.put(new HStoreKey(row, es.getKey()),
|
result.put(new HStoreKey(row, es.getKey()), es.getValue());
|
||||||
new ImmutableBytesWritable(es.getValue()));
|
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
|
|
|
@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -157,16 +158,16 @@ public class HStore implements HConstants {
|
||||||
* @param numVersions
|
* @param numVersions
|
||||||
* @return An array of byte arrays ordered by timestamp.
|
* @return An array of byte arrays ordered by timestamp.
|
||||||
*/
|
*/
|
||||||
List<byte[]> get(final HStoreKey key, final int numVersions) {
|
List<Cell> get(final HStoreKey key, final int numVersions) {
|
||||||
this.lock.readLock().lock();
|
this.lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
List<byte []> results;
|
List<Cell> results;
|
||||||
synchronized (memcache) {
|
synchronized (memcache) {
|
||||||
results = internalGet(memcache, key, numVersions);
|
results = internalGet(memcache, key, numVersions);
|
||||||
}
|
}
|
||||||
synchronized (snapshot) {
|
synchronized (snapshot) {
|
||||||
results.addAll(results.size(),
|
results.addAll(results.size(),
|
||||||
internalGet(snapshot, key, numVersions - results.size()));
|
internalGet(snapshot, key, numVersions - results.size()));
|
||||||
}
|
}
|
||||||
return results;
|
return results;
|
||||||
|
|
||||||
|
@ -183,7 +184,7 @@ public class HStore implements HConstants {
|
||||||
* @param key
|
* @param key
|
||||||
* @param results
|
* @param results
|
||||||
*/
|
*/
|
||||||
void getFull(HStoreKey key, SortedMap<Text, byte[]> results) {
|
void getFull(HStoreKey key, SortedMap<Text, Cell> results) {
|
||||||
this.lock.readLock().lock();
|
this.lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
synchronized (memcache) {
|
synchronized (memcache) {
|
||||||
|
@ -198,14 +199,14 @@ public class HStore implements HConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void internalGetFull(SortedMap<HStoreKey, byte []> map, HStoreKey key,
|
private void internalGetFull(SortedMap<HStoreKey, byte[]> map, HStoreKey key,
|
||||||
SortedMap<Text, byte []> results) {
|
SortedMap<Text, Cell> results) {
|
||||||
|
|
||||||
if (map.isEmpty() || key == null) {
|
if (map.isEmpty() || key == null) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SortedMap<HStoreKey, byte []> tailMap = map.tailMap(key);
|
SortedMap<HStoreKey, byte[]> tailMap = map.tailMap(key);
|
||||||
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
|
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
|
||||||
HStoreKey itKey = es.getKey();
|
HStoreKey itKey = es.getKey();
|
||||||
Text itCol = itKey.getColumn();
|
Text itCol = itKey.getColumn();
|
||||||
|
@ -213,7 +214,7 @@ public class HStore implements HConstants {
|
||||||
byte [] val = tailMap.get(itKey);
|
byte [] val = tailMap.get(itKey);
|
||||||
|
|
||||||
if (!HLogEdit.isDeleted(val)) {
|
if (!HLogEdit.isDeleted(val)) {
|
||||||
results.put(itCol, val);
|
results.put(itCol, new Cell(val, itKey.getTimestamp()));
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if (key.getRow().compareTo(itKey.getRow()) < 0) {
|
} else if (key.getRow().compareTo(itKey.getRow()) < 0) {
|
||||||
|
@ -318,11 +319,11 @@ public class HStore implements HConstants {
|
||||||
* @return Ordered list of items found in passed <code>map</code>. If no
|
* @return Ordered list of items found in passed <code>map</code>. If no
|
||||||
* matching values, returns an empty list (does not return null).
|
* matching values, returns an empty list (does not return null).
|
||||||
*/
|
*/
|
||||||
private ArrayList<byte []> internalGet(
|
private ArrayList<Cell> internalGet(
|
||||||
final SortedMap<HStoreKey, byte []> map, final HStoreKey key,
|
final SortedMap<HStoreKey, byte []> map, final HStoreKey key,
|
||||||
final int numVersions) {
|
final int numVersions) {
|
||||||
|
|
||||||
ArrayList<byte []> result = new ArrayList<byte []>();
|
ArrayList<Cell> result = new ArrayList<Cell>();
|
||||||
// TODO: If get is of a particular version -- numVersions == 1 -- we
|
// TODO: If get is of a particular version -- numVersions == 1 -- we
|
||||||
// should be able to avoid all of the tailmap creations and iterations
|
// should be able to avoid all of the tailmap creations and iterations
|
||||||
// below.
|
// below.
|
||||||
|
@ -331,7 +332,7 @@ public class HStore implements HConstants {
|
||||||
HStoreKey itKey = es.getKey();
|
HStoreKey itKey = es.getKey();
|
||||||
if (itKey.matchesRowCol(key)) {
|
if (itKey.matchesRowCol(key)) {
|
||||||
if (!HLogEdit.isDeleted(es.getValue())) {
|
if (!HLogEdit.isDeleted(es.getValue())) {
|
||||||
result.add(tailMap.get(itKey));
|
result.add(new Cell(tailMap.get(itKey), itKey.getTimestamp()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (numVersions > 0 && result.size() >= numVersions) {
|
if (numVersions > 0 && result.size() >= numVersions) {
|
||||||
|
@ -1602,7 +1603,7 @@ public class HStore implements HConstants {
|
||||||
*
|
*
|
||||||
* The returned object should map column names to byte arrays (byte[]).
|
* The returned object should map column names to byte arrays (byte[]).
|
||||||
*/
|
*/
|
||||||
void getFull(HStoreKey key, TreeMap<Text, byte []> results)
|
void getFull(HStoreKey key, TreeMap<Text, Cell> results)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
|
Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
|
||||||
|
|
||||||
|
@ -1630,7 +1631,7 @@ public class HStore implements HConstants {
|
||||||
if(isDeleted(readkey, readval.get(), true, deletes)) {
|
if(isDeleted(readkey, readval.get(), true, deletes)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
results.put(new Text(readcol), readval.get());
|
results.put(new Text(readcol), new Cell(readval.get(), readkey.getTimestamp()));
|
||||||
readval = new ImmutableBytesWritable();
|
readval = new ImmutableBytesWritable();
|
||||||
} else if(key.getRow().compareTo(readkey.getRow()) < 0) {
|
} else if(key.getRow().compareTo(readkey.getRow()) < 0) {
|
||||||
break;
|
break;
|
||||||
|
@ -1660,7 +1661,7 @@ public class HStore implements HConstants {
|
||||||
* @return values for the specified versions
|
* @return values for the specified versions
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
byte [][] get(HStoreKey key, int numVersions) throws IOException {
|
Cell[] get(HStoreKey key, int numVersions) throws IOException {
|
||||||
if (numVersions <= 0) {
|
if (numVersions <= 0) {
|
||||||
throw new IllegalArgumentException("Number of versions must be > 0");
|
throw new IllegalArgumentException("Number of versions must be > 0");
|
||||||
}
|
}
|
||||||
|
@ -1668,10 +1669,10 @@ public class HStore implements HConstants {
|
||||||
this.lock.readLock().lock();
|
this.lock.readLock().lock();
|
||||||
try {
|
try {
|
||||||
// Check the memcache
|
// Check the memcache
|
||||||
List<byte[]> results = this.memcache.get(key, numVersions);
|
List<Cell> results = this.memcache.get(key, numVersions);
|
||||||
// If we got sufficient versions from memcache, return.
|
// If we got sufficient versions from memcache, return.
|
||||||
if (results.size() == numVersions) {
|
if (results.size() == numVersions) {
|
||||||
return ImmutableBytesWritable.toArray(results);
|
return results.toArray(new Cell[results.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep a list of deleted cell keys. We need this because as we go through
|
// Keep a list of deleted cell keys. We need this because as we go through
|
||||||
|
@ -1702,7 +1703,7 @@ public class HStore implements HConstants {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
||||||
results.add(readval.get());
|
results.add(new Cell(readval.get(), readkey.getTimestamp()));
|
||||||
// Perhaps only one version is wanted. I could let this
|
// Perhaps only one version is wanted. I could let this
|
||||||
// test happen later in the for loop test but it would cost
|
// test happen later in the for loop test but it would cost
|
||||||
// the allocation of an ImmutableBytesWritable.
|
// the allocation of an ImmutableBytesWritable.
|
||||||
|
@ -1716,7 +1717,7 @@ public class HStore implements HConstants {
|
||||||
!hasEnoughVersions(numVersions, results);
|
!hasEnoughVersions(numVersions, results);
|
||||||
readval = new ImmutableBytesWritable()) {
|
readval = new ImmutableBytesWritable()) {
|
||||||
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
||||||
results.add(readval.get());
|
results.add(new Cell(readval.get(), readkey.getTimestamp()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1725,14 +1726,14 @@ public class HStore implements HConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return results.size() == 0 ?
|
return results.size() == 0 ?
|
||||||
null : ImmutableBytesWritable.toArray(results);
|
null : results.toArray(new Cell[results.size()]);
|
||||||
} finally {
|
} finally {
|
||||||
this.lock.readLock().unlock();
|
this.lock.readLock().unlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean hasEnoughVersions(final int numVersions,
|
private boolean hasEnoughVersions(final int numVersions,
|
||||||
final List<byte []> results) {
|
final List<Cell> results) {
|
||||||
return numVersions > 0 && results.size() >= numVersions;
|
return numVersions > 0 && results.size() >= numVersions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,8 @@ import org.znerd.xmlenc.LineBreak;
|
||||||
import org.znerd.xmlenc.XMLOutputter;
|
import org.znerd.xmlenc.XMLOutputter;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* GenericHandler contains some basic common stuff that all the individual
|
* GenericHandler contains some basic common stuff that all the individual
|
||||||
* REST handler types take advantage of.
|
* REST handler types take advantage of.
|
||||||
|
@ -226,7 +228,7 @@ public abstract class GenericHandler {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected void outputColumnsXml(final XMLOutputter outputter,
|
protected void outputColumnsXml(final XMLOutputter outputter,
|
||||||
final Map<Text, byte[]> m)
|
final Map<Text, byte[]> m)
|
||||||
throws IllegalStateException, IllegalArgumentException, IOException {
|
throws IllegalStateException, IllegalArgumentException, IOException {
|
||||||
for (Map.Entry<Text, byte[]> e: m.entrySet()) {
|
for (Map.Entry<Text, byte[]> e: m.entrySet()) {
|
||||||
outputter.startTag(COLUMN);
|
outputter.startTag(COLUMN);
|
||||||
|
@ -239,18 +241,19 @@ public abstract class GenericHandler {
|
||||||
outputter.endTag();
|
outputter.endTag();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void outputColumnsMime(final MultiPartResponse mpr,
|
// Commented - multipart support is currently nonexistant.
|
||||||
final Map<Text, byte[]> m)
|
// protected void outputColumnsMime(final MultiPartResponse mpr,
|
||||||
throws IOException {
|
// final Map<Text, Cell> m)
|
||||||
for (Map.Entry<Text, byte[]> e: m.entrySet()) {
|
// throws IOException {
|
||||||
mpr.startPart("application/octet-stream",
|
// for (Map.Entry<Text, Cell> e: m.entrySet()) {
|
||||||
new String [] {"Content-Description: " + e.getKey().toString(),
|
// mpr.startPart("application/octet-stream",
|
||||||
"Content-Transfer-Encoding: binary",
|
// new String [] {"Content-Description: " + e.getKey().toString(),
|
||||||
"Content-Length: " + e.getValue().length});
|
// "Content-Transfer-Encoding: binary",
|
||||||
mpr.getOut().write(e.getValue());
|
// "Content-Length: " + e.getValue().getValue().length});
|
||||||
}
|
// mpr.getOut().write(e.getValue().getValue());
|
||||||
}
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get an HTable instance by it's table name.
|
* Get an HTable instance by it's table name.
|
||||||
|
|
|
@ -154,7 +154,8 @@ public class ScannerHandler extends GenericHandler {
|
||||||
outputScannerEntryXML(response, sr);
|
outputScannerEntryXML(response, sr);
|
||||||
break;
|
break;
|
||||||
case MIME:
|
case MIME:
|
||||||
outputScannerEntryMime(response, sr);
|
/* outputScannerEntryMime(response, sr);*/
|
||||||
|
doNotAcceptable(response);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
doNotAcceptable(response);
|
doNotAcceptable(response);
|
||||||
|
@ -199,48 +200,48 @@ public class ScannerHandler extends GenericHandler {
|
||||||
outputter.getWriter().close();
|
outputter.getWriter().close();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void outputScannerEntryMime(final HttpServletResponse response,
|
// private void outputScannerEntryMime(final HttpServletResponse response,
|
||||||
final ScannerRecord sr)
|
// final ScannerRecord sr)
|
||||||
throws IOException {
|
// throws IOException {
|
||||||
response.setStatus(200);
|
// response.setStatus(200);
|
||||||
// This code ties me to the jetty server.
|
// // This code ties me to the jetty server.
|
||||||
MultiPartResponse mpr = new MultiPartResponse(response);
|
// MultiPartResponse mpr = new MultiPartResponse(response);
|
||||||
// Content type should look like this for multipart:
|
// // Content type should look like this for multipart:
|
||||||
// Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
// // Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||||
String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
// String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||||
mpr.getBoundary() + "\"";
|
// mpr.getBoundary() + "\"";
|
||||||
// Setting content type is broken. I'm unable to set parameters on the
|
// // Setting content type is broken. I'm unable to set parameters on the
|
||||||
// content-type; They get stripped. Can't set boundary, etc.
|
// // content-type; They get stripped. Can't set boundary, etc.
|
||||||
// response.addHeader("Content-Type", ct);
|
// // response.addHeader("Content-Type", ct);
|
||||||
response.setContentType(ct);
|
// response.setContentType(ct);
|
||||||
// Write row, key-column and timestamp each in its own part.
|
// // Write row, key-column and timestamp each in its own part.
|
||||||
mpr.startPart("application/octet-stream",
|
// mpr.startPart("application/octet-stream",
|
||||||
new String [] {"Content-Description: row",
|
// new String [] {"Content-Description: row",
|
||||||
"Content-Transfer-Encoding: binary",
|
// "Content-Transfer-Encoding: binary",
|
||||||
"Content-Length: " + sr.getKey().getRow().getBytes().length});
|
// "Content-Length: " + sr.getKey().getRow().getBytes().length});
|
||||||
mpr.getOut().write(sr.getKey().getRow().getBytes());
|
// mpr.getOut().write(sr.getKey().getRow().getBytes());
|
||||||
|
//
|
||||||
// Usually key-column is empty when scanning.
|
// // Usually key-column is empty when scanning.
|
||||||
if (sr.getKey().getColumn() != null &&
|
// if (sr.getKey().getColumn() != null &&
|
||||||
sr.getKey().getColumn().getLength() > 0) {
|
// sr.getKey().getColumn().getLength() > 0) {
|
||||||
mpr.startPart("application/octet-stream",
|
// mpr.startPart("application/octet-stream",
|
||||||
new String [] {"Content-Description: key-column",
|
// new String [] {"Content-Description: key-column",
|
||||||
"Content-Transfer-Encoding: binary",
|
// "Content-Transfer-Encoding: binary",
|
||||||
"Content-Length: " + sr.getKey().getColumn().getBytes().length});
|
// "Content-Length: " + sr.getKey().getColumn().getBytes().length});
|
||||||
}
|
// }
|
||||||
mpr.getOut().write(sr.getKey().getColumn().getBytes());
|
// mpr.getOut().write(sr.getKey().getColumn().getBytes());
|
||||||
// TODO: Fix. Need to write out the timestamp in the ordained timestamp
|
// // TODO: Fix. Need to write out the timestamp in the ordained timestamp
|
||||||
// format.
|
// // format.
|
||||||
byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes();
|
// byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes();
|
||||||
mpr.startPart("application/octet-stream",
|
// mpr.startPart("application/octet-stream",
|
||||||
new String [] {"Content-Description: timestamp",
|
// new String [] {"Content-Description: timestamp",
|
||||||
"Content-Transfer-Encoding: binary",
|
// "Content-Transfer-Encoding: binary",
|
||||||
"Content-Length: " + timestampBytes.length});
|
// "Content-Length: " + timestampBytes.length});
|
||||||
mpr.getOut().write(timestampBytes);
|
// mpr.getOut().write(timestampBytes);
|
||||||
// Write out columns
|
// // Write out columns
|
||||||
outputColumnsMime(mpr, sr.getValue());
|
// outputColumnsMime(mpr, sr.getValue());
|
||||||
mpr.close();
|
// mpr.close();
|
||||||
}
|
// }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create scanner
|
* Create scanner
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
import org.mortbay.servlet.MultiPartResponse;
|
import org.mortbay.servlet.MultiPartResponse;
|
||||||
import org.w3c.dom.Document;
|
import org.w3c.dom.Document;
|
||||||
|
@ -133,7 +134,7 @@ public class TableHandler extends GenericHandler {
|
||||||
// They want full row returned.
|
// They want full row returned.
|
||||||
|
|
||||||
// Presumption is that this.table has already been focused on target table.
|
// Presumption is that this.table has already been focused on target table.
|
||||||
Map<Text, byte[]> result = timestampStr == null ?
|
Map<Text, Cell> result = timestampStr == null ?
|
||||||
table.getRow(new Text(row))
|
table.getRow(new Text(row))
|
||||||
: table.getRow(new Text(row), Long.parseLong(timestampStr));
|
: table.getRow(new Text(row), Long.parseLong(timestampStr));
|
||||||
|
|
||||||
|
@ -145,15 +146,13 @@ public class TableHandler extends GenericHandler {
|
||||||
outputRowXml(response, result);
|
outputRowXml(response, result);
|
||||||
break;
|
break;
|
||||||
case MIME:
|
case MIME:
|
||||||
outputRowMime(response, result);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
||||||
request.getHeader(CONTENT_TYPE));
|
request.getHeader(CONTENT_TYPE));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Map<Text, byte[]> prefiltered_result = table.getRow(new Text(row));
|
Map<Text, Cell> prefiltered_result = table.getRow(new Text(row));
|
||||||
|
|
||||||
if (prefiltered_result == null || prefiltered_result.size() == 0) {
|
if (prefiltered_result == null || prefiltered_result.size() == 0) {
|
||||||
doNotFound(response, "Row not found!");
|
doNotFound(response, "Row not found!");
|
||||||
|
@ -166,10 +165,12 @@ public class TableHandler extends GenericHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// output map that will contain the filtered results
|
// output map that will contain the filtered results
|
||||||
Map<Text, byte[]> m = new HashMap<Text, byte[]>();
|
Map<Text, Cell> m = new HashMap<Text, Cell>();
|
||||||
|
|
||||||
// get an array of all the columns retrieved
|
// get an array of all the columns retrieved
|
||||||
Object[] columns_retrieved = prefiltered_result.keySet().toArray();
|
Text[] columns_retrieved =
|
||||||
|
prefiltered_result.keySet().toArray(
|
||||||
|
new Text[prefiltered_result.keySet().size()]);
|
||||||
|
|
||||||
// copy over those cells with requested column names
|
// copy over those cells with requested column names
|
||||||
for(int i = 0; i < columns_retrieved.length; i++){
|
for(int i = 0; i < columns_retrieved.length; i++){
|
||||||
|
@ -184,8 +185,6 @@ public class TableHandler extends GenericHandler {
|
||||||
outputRowXml(response, m);
|
outputRowXml(response, m);
|
||||||
break;
|
break;
|
||||||
case MIME:
|
case MIME:
|
||||||
outputRowMime(response, m);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
||||||
request.getHeader(CONTENT_TYPE));
|
request.getHeader(CONTENT_TYPE));
|
||||||
|
@ -201,13 +200,17 @@ public class TableHandler extends GenericHandler {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void outputRowXml(final HttpServletResponse response,
|
private void outputRowXml(final HttpServletResponse response,
|
||||||
final Map<Text, byte[]> result)
|
final Map<Text, Cell> result)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
setResponseHeader(response, result.size() > 0? 200: 204,
|
setResponseHeader(response, result.size() > 0? 200: 204,
|
||||||
ContentType.XML.toString());
|
ContentType.XML.toString());
|
||||||
XMLOutputter outputter = getXMLOutputter(response.getWriter());
|
XMLOutputter outputter = getXMLOutputter(response.getWriter());
|
||||||
outputter.startTag(ROW);
|
outputter.startTag(ROW);
|
||||||
outputColumnsXml(outputter, result);
|
HashMap<Text, byte[]> converted = new HashMap<Text, byte[]>();
|
||||||
|
for (Map.Entry<Text, Cell> entry : result.entrySet()) {
|
||||||
|
converted.put(entry.getKey(), entry.getValue().getValue());
|
||||||
|
}
|
||||||
|
outputColumnsXml(outputter, converted);
|
||||||
outputter.endTag();
|
outputter.endTag();
|
||||||
outputter.endDocument();
|
outputter.endDocument();
|
||||||
outputter.getWriter().close();
|
outputter.getWriter().close();
|
||||||
|
@ -218,23 +221,23 @@ public class TableHandler extends GenericHandler {
|
||||||
* @param result
|
* @param result
|
||||||
* Output the results contained in result as a multipart/related response.
|
* Output the results contained in result as a multipart/related response.
|
||||||
*/
|
*/
|
||||||
private void outputRowMime(final HttpServletResponse response,
|
// private void outputRowMime(final HttpServletResponse response,
|
||||||
final Map<Text, byte[]> result)
|
// final Map<Text, Cell> result)
|
||||||
throws IOException {
|
// throws IOException {
|
||||||
response.setStatus(result.size() > 0? 200: 204);
|
// response.setStatus(result.size() > 0? 200: 204);
|
||||||
// This code ties me to the jetty server.
|
// // This code ties me to the jetty server.
|
||||||
MultiPartResponse mpr = new MultiPartResponse(response);
|
// MultiPartResponse mpr = new MultiPartResponse(response);
|
||||||
// Content type should look like this for multipart:
|
// // Content type should look like this for multipart:
|
||||||
// Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
// // Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||||
String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
// String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||||
mpr.getBoundary() + "\"";
|
// mpr.getBoundary() + "\"";
|
||||||
// Setting content type is broken. I'm unable to set parameters on the
|
// // Setting content type is broken. I'm unable to set parameters on the
|
||||||
// content-type; They get stripped. Can't set boundary, etc.
|
// // content-type; They get stripped. Can't set boundary, etc.
|
||||||
// response.addHeader("Content-Type", ct);
|
// // response.addHeader("Content-Type", ct);
|
||||||
response.setContentType(ct);
|
// response.setContentType(ct);
|
||||||
outputColumnsMime(mpr, result);
|
// outputColumnsMime(mpr, result);
|
||||||
mpr.close();
|
// mpr.close();
|
||||||
}
|
// }
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @param request
|
* @param request
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.thrift.generated.RegionDescriptor;
|
||||||
import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
|
import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
import com.facebook.thrift.TException;
|
import com.facebook.thrift.TException;
|
||||||
import com.facebook.thrift.protocol.TBinaryProtocol;
|
import com.facebook.thrift.protocol.TBinaryProtocol;
|
||||||
|
@ -84,7 +85,7 @@ public class ThriftServer {
|
||||||
* name of table
|
* name of table
|
||||||
* @return HTable object
|
* @return HTable object
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @throws IOException
|
* @throws IOError
|
||||||
*/
|
*/
|
||||||
protected HTable getTable(final byte[] tableName) throws IOError,
|
protected HTable getTable(final byte[] tableName) throws IOError,
|
||||||
IOException {
|
IOException {
|
||||||
|
@ -144,6 +145,7 @@ public class ThriftServer {
|
||||||
* UTF-8 encoded bytes
|
* UTF-8 encoded bytes
|
||||||
* @return Text object
|
* @return Text object
|
||||||
* @throws IllegalArgument
|
* @throws IllegalArgument
|
||||||
|
* @throws IOError
|
||||||
*/
|
*/
|
||||||
Text getText(byte[] buf) throws IOError {
|
Text getText(byte[] buf) throws IOError {
|
||||||
try {
|
try {
|
||||||
|
@ -200,11 +202,11 @@ public class ThriftServer {
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HTable table = getTable(tableName);
|
HTable table = getTable(tableName);
|
||||||
byte[] value = table.get(getText(row), getText(column));
|
Cell value = table.get(getText(row), getText(column));
|
||||||
if (value == null) {
|
if (value == null) {
|
||||||
throw new NotFound();
|
throw new NotFound();
|
||||||
}
|
}
|
||||||
return value;
|
return value.getValue();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOError(e.getMessage());
|
throw new IOError(e.getMessage());
|
||||||
}
|
}
|
||||||
|
@ -219,11 +221,16 @@ public class ThriftServer {
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HTable table = getTable(tableName);
|
HTable table = getTable(tableName);
|
||||||
byte[][] values = table.get(getText(row), getText(column), numVersions);
|
Cell[] values =
|
||||||
|
table.get(getText(row), getText(column), numVersions);
|
||||||
if (values == null) {
|
if (values == null) {
|
||||||
throw new NotFound();
|
throw new NotFound();
|
||||||
}
|
}
|
||||||
return new ArrayList<byte[]>(Arrays.asList(values));
|
ArrayList<byte[]> list = new ArrayList<byte[]>();
|
||||||
|
for (int i = 0; i < values.length; i++) {
|
||||||
|
list.add(values[i].getValue());
|
||||||
|
}
|
||||||
|
return list;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOError(e.getMessage());
|
throw new IOError(e.getMessage());
|
||||||
}
|
}
|
||||||
|
@ -239,12 +246,16 @@ public class ThriftServer {
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HTable table = getTable(tableName);
|
HTable table = getTable(tableName);
|
||||||
byte[][] values = table.get(getText(row), getText(column), timestamp,
|
Cell[] values = table.get(getText(row),
|
||||||
numVersions);
|
getText(column), timestamp, numVersions);
|
||||||
if (values == null) {
|
if (values == null) {
|
||||||
throw new NotFound();
|
throw new NotFound();
|
||||||
}
|
}
|
||||||
return new ArrayList<byte[]>(Arrays.asList(values));
|
ArrayList<byte[]> list = new ArrayList<byte[]>();
|
||||||
|
for (int i = 0; i < values.length; i++) {
|
||||||
|
list.add(values[i].getValue());
|
||||||
|
}
|
||||||
|
return list;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOError(e.getMessage());
|
throw new IOError(e.getMessage());
|
||||||
}
|
}
|
||||||
|
@ -263,11 +274,12 @@ public class ThriftServer {
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
HTable table = getTable(tableName);
|
HTable table = getTable(tableName);
|
||||||
SortedMap<Text, byte[]> values = table.getRow(getText(row), timestamp);
|
SortedMap<Text, Cell> values =
|
||||||
|
table.getRow(getText(row), timestamp);
|
||||||
// copy the map from type <Text, byte[]> to <byte[], byte[]>
|
// copy the map from type <Text, byte[]> to <byte[], byte[]>
|
||||||
HashMap<byte[], byte[]> returnValues = new HashMap<byte[], byte[]>();
|
HashMap<byte[], byte[]> returnValues = new HashMap<byte[], byte[]>();
|
||||||
for (Entry<Text, byte[]> e : values.entrySet()) {
|
for (Entry<Text, Cell> e : values.entrySet()) {
|
||||||
returnValues.put(e.getKey().getBytes(), e.getValue());
|
returnValues.put(e.getKey().getBytes(), e.getValue().getValue());
|
||||||
}
|
}
|
||||||
return returnValues;
|
return returnValues;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -426,7 +426,8 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
||||||
t.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
t.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
||||||
t.commit(lockid);
|
t.commit(lockid);
|
||||||
// Assert added.
|
// Assert added.
|
||||||
byte [] bytes = t.get(region.getRegionName(), HConstants.COL_REGIONINFO);
|
byte [] bytes =
|
||||||
|
t.get(region.getRegionName(), HConstants.COL_REGIONINFO).getValue();
|
||||||
HRegionInfo hri = Writables.getHRegionInfo(bytes);
|
HRegionInfo hri = Writables.getHRegionInfo(bytes);
|
||||||
assertEquals(region.getRegionId(), hri.getRegionId());
|
assertEquals(region.getRegionId(), hri.getRegionId());
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
|
||||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -340,7 +340,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @return value for row/column pair
|
* @return value for row/column pair
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [] get(Text row, Text column) throws IOException;
|
public Cell get(Text row, Text column) throws IOException;
|
||||||
/**
|
/**
|
||||||
* @param row
|
* @param row
|
||||||
* @param column
|
* @param column
|
||||||
|
@ -348,7 +348,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @return value for row/column pair for number of versions requested
|
* @return value for row/column pair for number of versions requested
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(Text row, Text column, int versions) throws IOException;
|
public Cell[] get(Text row, Text column, int versions) throws IOException;
|
||||||
/**
|
/**
|
||||||
* @param row
|
* @param row
|
||||||
* @param column
|
* @param column
|
||||||
|
@ -357,7 +357,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @return value for row/column/timestamp tuple for number of versions
|
* @return value for row/column/timestamp tuple for number of versions
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public byte [][] get(Text row, Text column, long ts, int versions)
|
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
/**
|
/**
|
||||||
* @param lockid
|
* @param lockid
|
||||||
|
@ -478,16 +478,16 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
return this.region.getScanner(columns, firstRow, ts, null);
|
return this.region.getScanner(columns, firstRow, ts, null);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[] get(Text row, Text column) throws IOException {
|
public Cell get(Text row, Text column) throws IOException {
|
||||||
return this.region.get(row, column);
|
return this.region.get(row, column);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[][] get(Text row, Text column, int versions) throws IOException {
|
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||||
return this.region.get(row, column, versions);
|
return this.region.get(row, column, versions);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[][] get(Text row, Text column, long ts, int versions)
|
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return this.region.get(row, column, ts, versions);
|
return this.region.get(row, column, ts, versions);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -495,7 +495,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @return values for each column in the specified row
|
* @return values for each column in the specified row
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Map<Text, byte []> getFull(Text row) throws IOException {
|
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||||
return region.getFull(row);
|
return region.getFull(row);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
|
@ -550,17 +550,34 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
return this.table.obtainScanner(columns, firstRow, ts, null);
|
return this.table.obtainScanner(columns, firstRow, ts, null);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[] get(Text row, Text column) throws IOException {
|
public Cell get(Text row, Text column) throws IOException {
|
||||||
return this.table.get(row, column);
|
return this.table.get(row, column);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[][] get(Text row, Text column, int versions) throws IOException {
|
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||||
return this.table.get(row, column, versions);
|
return this.table.get(row, column, versions);
|
||||||
}
|
}
|
||||||
/** {@inheritDoc} */
|
/** {@inheritDoc} */
|
||||||
public byte[][] get(Text row, Text column, long ts, int versions)
|
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return this.table.get(row, column, ts, versions);
|
return this.table.get(row, column, ts, versions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void assertCellEquals(final HRegion region, final Text row,
|
||||||
|
final Text column, final long timestamp, final String value)
|
||||||
|
throws IOException {
|
||||||
|
Map<Text, Cell> result = region.getFull(row, timestamp);
|
||||||
|
Cell cell_value = result.get(column);
|
||||||
|
if(value == null){
|
||||||
|
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
||||||
|
} else {
|
||||||
|
if (cell_value == null) {
|
||||||
|
fail(column.toString() + " at timestamp " + timestamp +
|
||||||
|
"\" was expected to be \"" + value + " but was null");
|
||||||
|
}
|
||||||
|
assertEquals(column.toString() + " at timestamp "
|
||||||
|
+ timestamp, value, new String(cell_value.getValue()));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
|
||||||
|
@ -206,10 +207,10 @@ public class TestBloomFilters extends HBaseClusterTestCase {
|
||||||
|
|
||||||
|
|
||||||
for(int i = 0; i < testKeys.length; i++) {
|
for(int i = 0; i < testKeys.length; i++) {
|
||||||
byte[] value = table.get(testKeys[i], CONTENTS);
|
Cell value = table.get(testKeys[i], CONTENTS);
|
||||||
if(value != null && value.length != 0) {
|
if(value != null && value.getValue().length != 0) {
|
||||||
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
||||||
new String(value, HConstants.UTF8_ENCODING));
|
new String(value.getValue(), HConstants.UTF8_ENCODING));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -271,10 +272,10 @@ public class TestBloomFilters extends HBaseClusterTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int i = 0; i < testKeys.length; i++) {
|
for(int i = 0; i < testKeys.length; i++) {
|
||||||
byte[] value = table.get(testKeys[i], CONTENTS);
|
Cell value = table.get(testKeys[i], CONTENTS);
|
||||||
if(value != null && value.length != 0) {
|
if(value != null && value.getValue().length != 0) {
|
||||||
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
||||||
new String(value, HConstants.UTF8_ENCODING));
|
new String(value.getValue(), HConstants.UTF8_ENCODING));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||||
Text rowlabel = new Text("row_" + k);
|
Text rowlabel = new Text("row_" + k);
|
||||||
|
|
||||||
byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC);
|
byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC).getValue();
|
||||||
assertNotNull("no data for row " + rowlabel + "/" + CONTENTS_BASIC,
|
assertNotNull("no data for row " + rowlabel + "/" + CONTENTS_BASIC,
|
||||||
bodydata);
|
bodydata);
|
||||||
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||||
|
@ -125,7 +125,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
||||||
bodystr + "'", teststr.compareTo(bodystr) == 0);
|
bodystr + "'", teststr.compareTo(bodystr) == 0);
|
||||||
|
|
||||||
collabel = new Text(ANCHORNUM + k);
|
collabel = new Text(ANCHORNUM + k);
|
||||||
bodydata = table.get(rowlabel, collabel);
|
bodydata = table.get(rowlabel, collabel).getValue();
|
||||||
assertNotNull("no data for row " + rowlabel + "/" + collabel, bodydata);
|
assertNotNull("no data for row " + rowlabel + "/" + collabel, bodydata);
|
||||||
bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||||
teststr = ANCHORSTR + k;
|
teststr = ANCHORSTR + k;
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test compactions
|
* Test compactions
|
||||||
|
@ -101,9 +102,10 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
// Assert > 3 and then after compaction, assert that only 3 versions
|
// Assert > 3 and then after compaction, assert that only 3 versions
|
||||||
// available.
|
// available.
|
||||||
addContent(new HRegionIncommon(r), COLUMN_FAMILY);
|
addContent(new HRegionIncommon(r), COLUMN_FAMILY);
|
||||||
byte [][] bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
Cell[] cellValues =
|
||||||
|
r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||||
// Assert that I can get > 5 versions (Should be at least 5 in there).
|
// Assert that I can get > 5 versions (Should be at least 5 in there).
|
||||||
assertTrue(bytes.length >= 5);
|
assertTrue(cellValues.length >= 5);
|
||||||
// Try to run compaction concurrent with a thread flush just to see that
|
// Try to run compaction concurrent with a thread flush just to see that
|
||||||
// we can.
|
// we can.
|
||||||
final HRegion region = this.r;
|
final HRegion region = this.r;
|
||||||
|
@ -142,36 +144,36 @@ public class TestCompaction extends HBaseTestCase {
|
||||||
// Increment the least significant character so we get to next row.
|
// Increment the least significant character so we get to next row.
|
||||||
secondRowBytes[START_KEY_BYTES.length - 1]++;
|
secondRowBytes[START_KEY_BYTES.length - 1]++;
|
||||||
Text secondRow = new Text(secondRowBytes);
|
Text secondRow = new Text(secondRowBytes);
|
||||||
bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
|
cellValues = r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
|
||||||
LOG.info("Count of " + secondRow + ": " + bytes.length);
|
LOG.info("Count of " + secondRow + ": " + cellValues.length);
|
||||||
// Commented out because fails on an hp+ubuntu single-processor w/ 1G and
|
// Commented out because fails on an hp+ubuntu single-processor w/ 1G and
|
||||||
// "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
|
// "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
|
||||||
// machines and even on hudson. On said machine, its reporting in the
|
// machines and even on hudson. On said machine, its reporting in the
|
||||||
// LOG line above that there are 3 items in row so it should pass the
|
// LOG line above that there are 3 items in row so it should pass the
|
||||||
// below test.
|
// below test.
|
||||||
assertTrue(bytes.length == 3 || bytes.length == 4);
|
assertTrue(cellValues.length == 3 || cellValues.length == 4);
|
||||||
|
|
||||||
// Now add deletes to memcache and then flush it. That will put us over
|
// Now add deletes to memcache and then flush it. That will put us over
|
||||||
// the compaction threshold of 3 store files. Compacting these store files
|
// the compaction threshold of 3 store files. Compacting these store files
|
||||||
// should result in a compacted store file that has no references to the
|
// should result in a compacted store file that has no references to the
|
||||||
// deleted row.
|
// deleted row.
|
||||||
this.r.deleteAll(STARTROW, COLUMN_FAMILY_TEXT, System.currentTimeMillis());
|
r.deleteAll(STARTROW, COLUMN_FAMILY_TEXT, System.currentTimeMillis());
|
||||||
// Now, before compacting, remove all instances of the first row so can
|
// Now, before compacting, remove all instances of the first row so can
|
||||||
// verify that it is removed as we compact.
|
// verify that it is removed as we compact.
|
||||||
// Assert all delted.
|
// Assert all delted.
|
||||||
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||||
this.r.flushcache();
|
r.flushcache();
|
||||||
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||||
// Add a bit of data and flush it so we for sure have the compaction limit
|
// Add a bit of data and flush it so we for sure have the compaction limit
|
||||||
// for store files. Usually by this time we will have but if compaction
|
// for store files. Usually by this time we will have but if compaction
|
||||||
// included the flush that ran 'concurrently', there may be just the
|
// included the flush that ran 'concurrently', there may be just the
|
||||||
// compacted store and the flush above when we added deletes. Add more
|
// compacted store and the flush above when we added deletes. Add more
|
||||||
// content to be certain.
|
// content to be certain.
|
||||||
createSmallerStoreFile(this.r);
|
createSmallerStoreFile(this.r);
|
||||||
assertTrue(this.r.compactIfNeeded());
|
assertTrue(r.compactIfNeeded());
|
||||||
// Assert that the first row is still deleted.
|
// Assert that the first row is still deleted.
|
||||||
bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||||
assertNull(bytes);
|
assertNull(cellValues);
|
||||||
// Assert the store files do not have the first record 'aaa' keys in them.
|
// Assert the store files do not have the first record 'aaa' keys in them.
|
||||||
for (MapFile.Reader reader:
|
for (MapFile.Reader reader:
|
||||||
this.r.stores.get(COLUMN_FAMILY_TEXT_MINUS_COLON).getReaders()) {
|
this.r.stores.get(COLUMN_FAMILY_TEXT_MINUS_COLON).getReaders()) {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the functionality of deleteAll.
|
* Test the functionality of deleteAll.
|
||||||
|
@ -120,42 +121,25 @@ public class TestDeleteAll extends HBaseTestCase {
|
||||||
// call delete all at a timestamp, make sure only the most recent stuff is left behind
|
// call delete all at a timestamp, make sure only the most recent stuff is left behind
|
||||||
region.deleteAll(row, t1);
|
region.deleteAll(row, t1);
|
||||||
if (flush) {region_incommon.flushcache();}
|
if (flush) {region_incommon.flushcache();}
|
||||||
assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
|
assertCellEquals(region, row, colA, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colA, t1, null);
|
assertCellEquals(region, row, colA, t1, null);
|
||||||
assertCellValueEquals(region, row, colA, t2, null);
|
assertCellEquals(region, row, colA, t2, null);
|
||||||
assertCellValueEquals(region, row, colD, t0, cellData(0, flush));
|
assertCellEquals(region, row, colD, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colD, t1, null);
|
assertCellEquals(region, row, colD, t1, null);
|
||||||
assertCellValueEquals(region, row, colD, t2, null);
|
assertCellEquals(region, row, colD, t2, null);
|
||||||
|
|
||||||
// call delete all w/o a timestamp, make sure nothing is left.
|
// call delete all w/o a timestamp, make sure nothing is left.
|
||||||
region.deleteAll(row, HConstants.LATEST_TIMESTAMP);
|
region.deleteAll(row, HConstants.LATEST_TIMESTAMP);
|
||||||
if (flush) {region_incommon.flushcache();}
|
if (flush) {region_incommon.flushcache();}
|
||||||
assertCellValueEquals(region, row, colA, t0, null);
|
assertCellEquals(region, row, colA, t0, null);
|
||||||
assertCellValueEquals(region, row, colA, t1, null);
|
assertCellEquals(region, row, colA, t1, null);
|
||||||
assertCellValueEquals(region, row, colA, t2, null);
|
assertCellEquals(region, row, colA, t2, null);
|
||||||
assertCellValueEquals(region, row, colD, t0, null);
|
assertCellEquals(region, row, colD, t0, null);
|
||||||
assertCellValueEquals(region, row, colD, t1, null);
|
assertCellEquals(region, row, colD, t1, null);
|
||||||
assertCellValueEquals(region, row, colD, t2, null);
|
assertCellEquals(region, row, colD, t2, null);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
|
||||||
final Text column, final long timestamp, final String value)
|
|
||||||
throws IOException {
|
|
||||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
|
||||||
byte[] cell_value = result.get(column);
|
|
||||||
if(value == null){
|
|
||||||
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
|
||||||
} else {
|
|
||||||
if (cell_value == null) {
|
|
||||||
fail(column.toString() + " at timestamp " + timestamp +
|
|
||||||
"\" was expected to be \"" + value + " but was null");
|
|
||||||
}
|
|
||||||
assertEquals(column.toString() + " at timestamp "
|
|
||||||
+ timestamp, value, new String(cell_value));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private String cellData(int tsNum, boolean flush){
|
private String cellData(int tsNum, boolean flush){
|
||||||
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test the functionality of deleteFamily.
|
* Test the functionality of deleteFamily.
|
||||||
|
@ -117,15 +118,15 @@ public class TestDeleteFamily extends HBaseTestCase {
|
||||||
// most recent for A,B,C should be fine
|
// most recent for A,B,C should be fine
|
||||||
// A,B at older timestamps should be gone
|
// A,B at older timestamps should be gone
|
||||||
// C should be fine for older timestamps
|
// C should be fine for older timestamps
|
||||||
assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
|
assertCellEquals(region, row, colA, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colA, t1, null);
|
assertCellEquals(region, row, colA, t1, null);
|
||||||
assertCellValueEquals(region, row, colA, t2, null);
|
assertCellEquals(region, row, colA, t2, null);
|
||||||
assertCellValueEquals(region, row, colB, t0, cellData(0, flush));
|
assertCellEquals(region, row, colB, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colB, t1, null);
|
assertCellEquals(region, row, colB, t1, null);
|
||||||
assertCellValueEquals(region, row, colB, t2, null);
|
assertCellEquals(region, row, colB, t2, null);
|
||||||
assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
|
assertCellEquals(region, row, colC, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
|
assertCellEquals(region, row, colC, t1, cellData(1, flush));
|
||||||
assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
|
assertCellEquals(region, row, colC, t2, cellData(2, flush));
|
||||||
|
|
||||||
// call delete family w/o a timestamp, make sure nothing is left except for
|
// call delete family w/o a timestamp, make sure nothing is left except for
|
||||||
// column C.
|
// column C.
|
||||||
|
@ -133,31 +134,14 @@ public class TestDeleteFamily extends HBaseTestCase {
|
||||||
if (flush) {region_incommon.flushcache();}
|
if (flush) {region_incommon.flushcache();}
|
||||||
// A,B for latest timestamp should be gone
|
// A,B for latest timestamp should be gone
|
||||||
// C should still be fine
|
// C should still be fine
|
||||||
assertCellValueEquals(region, row, colA, t0, null);
|
assertCellEquals(region, row, colA, t0, null);
|
||||||
assertCellValueEquals(region, row, colB, t0, null);
|
assertCellEquals(region, row, colB, t0, null);
|
||||||
assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
|
assertCellEquals(region, row, colC, t0, cellData(0, flush));
|
||||||
assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
|
assertCellEquals(region, row, colC, t1, cellData(1, flush));
|
||||||
assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
|
assertCellEquals(region, row, colC, t2, cellData(2, flush));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
|
||||||
final Text column, final long timestamp, final String value)
|
|
||||||
throws IOException {
|
|
||||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
|
||||||
byte[] cell_value = result.get(column);
|
|
||||||
if(value == null){
|
|
||||||
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
|
||||||
} else {
|
|
||||||
if (cell_value == null) {
|
|
||||||
fail(column.toString() + " at timestamp " + timestamp +
|
|
||||||
"\" was expected to be \"" + value + " but was null");
|
|
||||||
}
|
|
||||||
assertEquals(column.toString() + " at timestamp "
|
|
||||||
+ timestamp, value, new String(cell_value));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private String cellData(int tsNum, boolean flush){
|
private String cellData(int tsNum, boolean flush){
|
||||||
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HServerAddress;
|
import org.apache.hadoop.hbase.HServerAddress;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/** Test case for get */
|
/** Test case for get */
|
||||||
public class TestGet extends HBaseTestCase {
|
public class TestGet extends HBaseTestCase {
|
||||||
|
@ -53,7 +53,7 @@ public class TestGet extends HBaseTestCase {
|
||||||
private void verifyGet(final HRegionIncommon r, final String expectedServer)
|
private void verifyGet(final HRegionIncommon r, final String expectedServer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// This should return a value because there is only one family member
|
// This should return a value because there is only one family member
|
||||||
byte [] value = r.get(ROW_KEY, CONTENTS);
|
Cell value = r.get(ROW_KEY, CONTENTS);
|
||||||
assertNotNull(value);
|
assertNotNull(value);
|
||||||
|
|
||||||
// This should not return a value because there are multiple family members
|
// This should not return a value because there are multiple family members
|
||||||
|
@ -61,13 +61,13 @@ public class TestGet extends HBaseTestCase {
|
||||||
assertNull(value);
|
assertNull(value);
|
||||||
|
|
||||||
// Find out what getFull returns
|
// Find out what getFull returns
|
||||||
Map<Text, byte []> values = r.getFull(ROW_KEY);
|
Map<Text, Cell> values = r.getFull(ROW_KEY);
|
||||||
|
|
||||||
// assertEquals(4, values.keySet().size());
|
// assertEquals(4, values.keySet().size());
|
||||||
for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
|
for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
|
||||||
Text column = i.next();
|
Text column = i.next();
|
||||||
if (column.equals(HConstants.COL_SERVER)) {
|
if (column.equals(HConstants.COL_SERVER)) {
|
||||||
String server = Writables.bytesToString(values.get(column));
|
String server = Writables.bytesToString(values.get(column).getValue());
|
||||||
assertEquals(expectedServer, server);
|
assertEquals(expectedServer, server);
|
||||||
LOG.info(server);
|
LOG.info(server);
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HScannerInterface;
|
import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@link TestGet} is a medley of tests of get all done up as a single test.
|
* {@link TestGet} is a medley of tests of get all done up as a single test.
|
||||||
|
@ -127,14 +127,14 @@ public class TestGet2 extends HBaseTestCase {
|
||||||
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
|
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
|
||||||
region_incommon.commit(lockid, right_now);
|
region_incommon.commit(lockid, right_now);
|
||||||
|
|
||||||
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
|
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||||
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||||
|
|
||||||
// Force a flush so store files come into play.
|
// Force a flush so store files come into play.
|
||||||
region_incommon.flushcache();
|
region_incommon.flushcache();
|
||||||
|
|
||||||
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
|
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||||
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
|
@ -186,33 +186,33 @@ public class TestGet2 extends HBaseTestCase {
|
||||||
|
|
||||||
// try finding "015"
|
// try finding "015"
|
||||||
Text t15 = new Text("015");
|
Text t15 = new Text("015");
|
||||||
Map<Text, byte[]> results =
|
Map<Text, Cell> results =
|
||||||
region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t10 bytes");
|
||||||
|
|
||||||
// try "020", we should get that row exactly
|
// try "020", we should get that row exactly
|
||||||
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t20 bytes");
|
||||||
|
|
||||||
// try "050", should get stuff from "040"
|
// try "050", should get stuff from "040"
|
||||||
Text t50 = new Text("050");
|
Text t50 = new Text("050");
|
||||||
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t40 bytes");
|
||||||
|
|
||||||
// force a flush
|
// force a flush
|
||||||
region.flushcache();
|
region.flushcache();
|
||||||
|
|
||||||
// try finding "015"
|
// try finding "015"
|
||||||
results = region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
results = region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t10 bytes");
|
||||||
|
|
||||||
// try "020", we should get that row exactly
|
// try "020", we should get that row exactly
|
||||||
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t20 bytes");
|
||||||
|
|
||||||
// try "050", should get stuff from "040"
|
// try "050", should get stuff from "040"
|
||||||
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
||||||
assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
|
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t40 bytes");
|
||||||
} finally {
|
} finally {
|
||||||
if (region != null) {
|
if (region != null) {
|
||||||
try {
|
try {
|
||||||
|
@ -224,26 +224,18 @@ public class TestGet2 extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
|
||||||
final Text column, final long timestamp, final String value)
|
|
||||||
throws IOException {
|
|
||||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
|
||||||
assertEquals("cell value at a given timestamp", new String(result.get(column)), value);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertColumnsPresent(final HRegion r, final Text row)
|
private void assertColumnsPresent(final HRegion r, final Text row)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Map<Text, byte[]> result = r.getFull(row);
|
Map<Text, Cell> result = r.getFull(row);
|
||||||
int columnCount = 0;
|
int columnCount = 0;
|
||||||
for (Map.Entry<Text, byte[]> e: result.entrySet()) {
|
for (Map.Entry<Text, Cell> e: result.entrySet()) {
|
||||||
columnCount++;
|
columnCount++;
|
||||||
String column = e.getKey().toString();
|
String column = e.getKey().toString();
|
||||||
boolean legitColumn = false;
|
boolean legitColumn = false;
|
||||||
for (int i = 0; i < COLUMNS.length; i++) {
|
for (int i = 0; i < COLUMNS.length; i++) {
|
||||||
// Assert value is same as row. This is 'nature' of the data added.
|
// Assert value is same as row. This is 'nature' of the data added.
|
||||||
assertTrue(row.equals(new Text(e.getValue())));
|
assertTrue(row.equals(new Text(e.getValue().getValue())));
|
||||||
if (COLUMNS[i].equals(new Text(column))) {
|
if (COLUMNS[i].equals(new Text(column))) {
|
||||||
legitColumn = true;
|
legitColumn = true;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.io.Text;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HStoreKey;
|
import org.apache.hadoop.hbase.HStoreKey;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/** memcache test case */
|
/** memcache test case */
|
||||||
public class TestHMemcache extends TestCase {
|
public class TestHMemcache extends TestCase {
|
||||||
|
@ -100,9 +101,8 @@ public class TestHMemcache extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
|
private void isExpectedRowWithoutTimestamps(final int rowIndex, TreeMap<Text, byte[]> row)
|
||||||
throws UnsupportedEncodingException {
|
throws UnsupportedEncodingException {
|
||||||
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (Text colname: row.keySet()) {
|
for (Text colname: row.keySet()) {
|
||||||
String expectedColname = getColumnName(rowIndex, i++).toString();
|
String expectedColname = getColumnName(rowIndex, i++).toString();
|
||||||
|
@ -118,6 +118,16 @@ public class TestHMemcache extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void isExpectedRow(final int rowIndex, TreeMap<Text, Cell> row)
|
||||||
|
throws UnsupportedEncodingException {
|
||||||
|
TreeMap<Text, byte[]> converted = new TreeMap<Text, byte[]>();
|
||||||
|
for (Map.Entry<Text, Cell> entry : row.entrySet()) {
|
||||||
|
converted.put(entry.getKey(),
|
||||||
|
entry.getValue() == null ? null : entry.getValue().getValue());
|
||||||
|
}
|
||||||
|
isExpectedRowWithoutTimestamps(rowIndex, converted);
|
||||||
|
}
|
||||||
|
|
||||||
/** Test getFull from memcache
|
/** Test getFull from memcache
|
||||||
* @throws UnsupportedEncodingException
|
* @throws UnsupportedEncodingException
|
||||||
*/
|
*/
|
||||||
|
@ -125,7 +135,7 @@ public class TestHMemcache extends TestCase {
|
||||||
addRows(this.hmemcache);
|
addRows(this.hmemcache);
|
||||||
for (int i = 0; i < ROW_COUNT; i++) {
|
for (int i = 0; i < ROW_COUNT; i++) {
|
||||||
HStoreKey hsk = new HStoreKey(getRowName(i));
|
HStoreKey hsk = new HStoreKey(getRowName(i));
|
||||||
TreeMap<Text, byte []> all = new TreeMap<Text, byte[]>();
|
TreeMap<Text, Cell> all = new TreeMap<Text, Cell>();
|
||||||
this.hmemcache.getFull(hsk, all);
|
this.hmemcache.getFull(hsk, all);
|
||||||
isExpectedRow(i, all);
|
isExpectedRow(i, all);
|
||||||
}
|
}
|
||||||
|
@ -157,7 +167,7 @@ public class TestHMemcache extends TestCase {
|
||||||
for(Map.Entry<Text, byte []> e: results.entrySet() ) {
|
for(Map.Entry<Text, byte []> e: results.entrySet() ) {
|
||||||
row.put(e.getKey(), e.getValue());
|
row.put(e.getKey(), e.getValue());
|
||||||
}
|
}
|
||||||
isExpectedRow(i, row);
|
isExpectedRowWithoutTimestamps(i, row);
|
||||||
// Clear out set. Otherwise row results accumulate.
|
// Clear out set. Otherwise row results accumulate.
|
||||||
results.clear();
|
results.clear();
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,7 +149,7 @@ implements RegionUnavailableListener {
|
||||||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||||
Text rowlabel = new Text("row_" + k);
|
Text rowlabel = new Text("row_" + k);
|
||||||
|
|
||||||
byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC);
|
byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC).getValue();
|
||||||
assertNotNull(bodydata);
|
assertNotNull(bodydata);
|
||||||
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
||||||
String teststr = CONTENTSTR + k;
|
String teststr = CONTENTSTR + k;
|
||||||
|
@ -157,7 +157,7 @@ implements RegionUnavailableListener {
|
||||||
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
||||||
bodystr, teststr);
|
bodystr, teststr);
|
||||||
collabel = new Text(ANCHORNUM + k);
|
collabel = new Text(ANCHORNUM + k);
|
||||||
bodydata = region.get(rowlabel, collabel);
|
bodydata = region.get(rowlabel, collabel).getValue();
|
||||||
bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
||||||
teststr = ANCHORSTR + k;
|
teststr = ANCHORSTR + k;
|
||||||
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
|
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
|
||||||
|
|
|
@ -137,7 +137,7 @@ public class TestScanner extends HBaseTestCase {
|
||||||
|
|
||||||
/** Use get to retrieve the HRegionInfo and validate it */
|
/** Use get to retrieve the HRegionInfo and validate it */
|
||||||
private void getRegionInfo() throws IOException {
|
private void getRegionInfo() throws IOException {
|
||||||
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
|
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO).getValue();
|
||||||
validateRegionInfo(bytes);
|
validateRegionInfo(bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HScannerInterface;
|
import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@Link TestHRegion} does a split but this TestCase adds testing of fast
|
* {@Link TestHRegion} does a split but this TestCase adds testing of fast
|
||||||
|
@ -182,10 +183,10 @@ public class TestSplit extends MultiRegionTable {
|
||||||
private void assertGet(final HRegion r, final String family, final Text k)
|
private void assertGet(final HRegion r, final String family, final Text k)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Now I have k, get values out and assert they are as expected.
|
// Now I have k, get values out and assert they are as expected.
|
||||||
byte [][] results = r.get(k, new Text(family),
|
Cell[] results = r.get(k, new Text(family),
|
||||||
Integer.MAX_VALUE);
|
Integer.MAX_VALUE);
|
||||||
for (int j = 0; j < results.length; j++) {
|
for (int j = 0; j < results.length; j++) {
|
||||||
Text tmp = new Text(results[j]);
|
Text tmp = new Text(results[j].getValue());
|
||||||
// Row should be equal to value every time.
|
// Row should be equal to value every time.
|
||||||
assertEquals(k.toString(), tmp.toString());
|
assertEquals(k.toString(), tmp.toString());
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.HScannerInterface;
|
import org.apache.hadoop.hbase.HScannerInterface;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.io.Cell;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests user specifiable time stamps putting, getting and scanning. Also
|
* Tests user specifiable time stamps putting, getting and scanning. Also
|
||||||
|
@ -195,9 +196,9 @@ public class TestTimestamp extends HBaseTestCase {
|
||||||
private void assertOnlyLatest(final Incommon incommon,
|
private void assertOnlyLatest(final Incommon incommon,
|
||||||
final long currentTime)
|
final long currentTime)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
byte [][] bytesBytes = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
Cell[] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
||||||
assertEquals(1, bytesBytes.length);
|
assertEquals(1, cellValues.length);
|
||||||
long time = Writables.bytesToLong(bytesBytes[0]);
|
long time = Writables.bytesToLong(cellValues[0].getValue());
|
||||||
assertEquals(time, currentTime);
|
assertEquals(time, currentTime);
|
||||||
assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
|
assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
|
||||||
assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
|
assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
|
||||||
|
@ -214,20 +215,20 @@ public class TestTimestamp extends HBaseTestCase {
|
||||||
private void assertVersions(final Incommon incommon, final long [] tss)
|
private void assertVersions(final Incommon incommon, final long [] tss)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Assert that 'latest' is what we expect.
|
// Assert that 'latest' is what we expect.
|
||||||
byte [] bytes = incommon.get(ROW, COLUMN);
|
byte [] bytes = incommon.get(ROW, COLUMN).getValue();
|
||||||
assertEquals(Writables.bytesToLong(bytes), tss[0]);
|
assertEquals(Writables.bytesToLong(bytes), tss[0]);
|
||||||
// Now assert that if we ask for multiple versions, that they come out in
|
// Now assert that if we ask for multiple versions, that they come out in
|
||||||
// order.
|
// order.
|
||||||
byte [][] bytesBytes = incommon.get(ROW, COLUMN, tss.length);
|
Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
|
||||||
assertEquals(tss.length, bytesBytes.length);
|
assertEquals(tss.length, cellValues.length);
|
||||||
for (int i = 0; i < bytesBytes.length; i++) {
|
for (int i = 0; i < cellValues.length; i++) {
|
||||||
long ts = Writables.bytesToLong(bytesBytes[i]);
|
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||||
assertEquals(ts, tss[i]);
|
assertEquals(ts, tss[i]);
|
||||||
}
|
}
|
||||||
// Specify a timestamp get multiple versions.
|
// Specify a timestamp get multiple versions.
|
||||||
bytesBytes = incommon.get(ROW, COLUMN, tss[0], bytesBytes.length - 1);
|
cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
|
||||||
for (int i = 1; i < bytesBytes.length; i++) {
|
for (int i = 1; i < cellValues.length; i++) {
|
||||||
long ts = Writables.bytesToLong(bytesBytes[i]);
|
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||||
assertEquals(ts, tss[i]);
|
assertEquals(ts, tss[i]);
|
||||||
}
|
}
|
||||||
// Test scanner returns expected version
|
// Test scanner returns expected version
|
||||||
|
|
Loading…
Reference in New Issue