HBASE-489 CellValue class for transporting cell timestamp with cell value simultaneously
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@635033 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
da6dde97ce
commit
935d300957
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* A non-instantiable class that manages connections to multiple tables in
|
||||
|
@ -358,13 +359,12 @@ public class HConnectionManager implements HConstants {
|
|||
* Convenience method for turning a MapWritable into the underlying
|
||||
* SortedMap we all know and love.
|
||||
*/
|
||||
private SortedMap<Text, byte[]> sortedMapFromMapWritable(
|
||||
private SortedMap<Text, Cell> sortedMapFromMapWritable(
|
||||
HbaseMapWritable writable) {
|
||||
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||
SortedMap<Text, Cell> results = new TreeMap<Text, Cell>();
|
||||
for (Map.Entry<Writable, Writable> e: writable.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
results.put(key.getColumn(),
|
||||
((ImmutableBytesWritable) e.getValue()).get());
|
||||
results.put(key.getColumn(), (Cell)e.getValue());
|
||||
}
|
||||
|
||||
return results;
|
||||
|
@ -423,19 +423,19 @@ public class HConnectionManager implements HConstants {
|
|||
}
|
||||
|
||||
// convert the MapWritable into a Map we can use
|
||||
SortedMap<Text, byte[]> results =
|
||||
SortedMap<Text, Cell> results =
|
||||
sortedMapFromMapWritable(regionInfoRow);
|
||||
|
||||
byte[] bytes = results.get(COL_REGIONINFO);
|
||||
Cell value = results.get(COL_REGIONINFO);
|
||||
|
||||
if (bytes == null || bytes.length == 0) {
|
||||
if (value == null || value.getValue().length == 0) {
|
||||
throw new IOException("HRegionInfo was null or empty in " +
|
||||
parentTable);
|
||||
}
|
||||
|
||||
// convert the row result into the HRegionLocation we need!
|
||||
HRegionInfo regionInfo = (HRegionInfo) Writables.getWritable(
|
||||
results.get(COL_REGIONINFO), new HRegionInfo());
|
||||
value.getValue(), new HRegionInfo());
|
||||
|
||||
// possible we got a region of a different table...
|
||||
if (!regionInfo.getTableDesc().getName().equals(tableName)) {
|
||||
|
@ -448,8 +448,10 @@ public class HConnectionManager implements HConstants {
|
|||
regionInfo.getRegionName());
|
||||
}
|
||||
|
||||
String serverAddress =
|
||||
Writables.bytesToString(results.get(COL_SERVER));
|
||||
Cell serverValue = results.get(COL_SERVER);
|
||||
|
||||
String serverAddress = Writables.bytesToString(
|
||||
serverValue == null ? null : serverValue.getValue());
|
||||
|
||||
if (serverAddress.equals("")) {
|
||||
throw new NoServerForRegionException(
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
|||
import org.apache.hadoop.hbase.filter.StopRowFilter;
|
||||
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
@ -257,14 +258,14 @@ public class HTable implements HConstants {
|
|||
* @return value for specified row/column
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[] get(Text row, final Text column) throws IOException {
|
||||
return getRegionServerWithRetries(new ServerCallable<byte[]>(row){
|
||||
public byte[] call() throws IOException {
|
||||
return server.get(location.getRegionInfo().getRegionName(), row, column);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public Cell get(final Text row, final Text column) throws IOException {
|
||||
return getRegionServerWithRetries(new ServerCallable<Cell>(row){
|
||||
public Cell call() throws IOException {
|
||||
return server.get(location.getRegionInfo().getRegionName(), row, column);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column
|
||||
*
|
||||
|
@ -274,27 +275,27 @@ public class HTable implements HConstants {
|
|||
* @return - array byte values
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[][] get(final Text row, final Text column, final int numVersions)
|
||||
public Cell[] get(final Text row, final Text column, final int numVersions)
|
||||
throws IOException {
|
||||
byte [][] values = null;
|
||||
Cell[] values = null;
|
||||
|
||||
values = getRegionServerWithRetries(new ServerCallable<byte[][]>(row) {
|
||||
public byte [][] call() throws IOException {
|
||||
values = getRegionServerWithRetries(new ServerCallable<Cell[]>(row) {
|
||||
public Cell[] call() throws IOException {
|
||||
return server.get(location.getRegionInfo().getRegionName(), row,
|
||||
column, numVersions);
|
||||
}
|
||||
});
|
||||
|
||||
if (values != null) {
|
||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
||||
ArrayList<Cell> cellValues = new ArrayList<Cell>();
|
||||
for (int i = 0 ; i < values.length; i++) {
|
||||
bytes.add(values[i]);
|
||||
cellValues.add(values[i]);
|
||||
}
|
||||
return bytes.toArray(new byte[values.length][]);
|
||||
return cellValues.toArray(new Cell[values.length]);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the specified number of versions of the specified row and column with
|
||||
* the specified timestamp.
|
||||
|
@ -306,28 +307,28 @@ public class HTable implements HConstants {
|
|||
* @return - array of values that match the above criteria
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte[][] get(final Text row, final Text column, final long timestamp,
|
||||
public Cell[] get(final Text row, final Text column, final long timestamp,
|
||||
final int numVersions)
|
||||
throws IOException {
|
||||
byte [][] values = null;
|
||||
Cell[] values = null;
|
||||
|
||||
values = getRegionServerWithRetries(new ServerCallable<byte[][]>(row) {
|
||||
public byte [][] call() throws IOException {
|
||||
values = getRegionServerWithRetries(new ServerCallable<Cell[]>(row) {
|
||||
public Cell[] call() throws IOException {
|
||||
return server.get(location.getRegionInfo().getRegionName(), row,
|
||||
column, timestamp, numVersions);
|
||||
}
|
||||
});
|
||||
|
||||
if (values != null) {
|
||||
ArrayList<byte[]> bytes = new ArrayList<byte[]>();
|
||||
ArrayList<Cell> cellValues = new ArrayList<Cell>();
|
||||
for (int i = 0 ; i < values.length; i++) {
|
||||
bytes.add(values[i]);
|
||||
cellValues.add(values[i]);
|
||||
}
|
||||
return bytes.toArray(new byte[values.length][]);
|
||||
return cellValues.toArray(new Cell[values.length]);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get all the data for the specified row at the latest timestamp
|
||||
*
|
||||
|
@ -335,7 +336,7 @@ public class HTable implements HConstants {
|
|||
* @return Map of columns to values. Map is empty if row does not exist.
|
||||
* @throws IOException
|
||||
*/
|
||||
public SortedMap<Text, byte[]> getRow(Text row) throws IOException {
|
||||
public SortedMap<Text, Cell> getRow(final Text row) throws IOException {
|
||||
return getRow(row, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
|
@ -347,7 +348,7 @@ public class HTable implements HConstants {
|
|||
* @return Map of columns to values. Map is empty if row does not exist.
|
||||
* @throws IOException
|
||||
*/
|
||||
public SortedMap<Text, byte[]> getRow(final Text row, final long ts)
|
||||
public SortedMap<Text, Cell> getRow(final Text row, final long ts)
|
||||
throws IOException {
|
||||
HbaseMapWritable value = null;
|
||||
|
||||
|
@ -357,18 +358,16 @@ public class HTable implements HConstants {
|
|||
}
|
||||
});
|
||||
|
||||
SortedMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||
SortedMap<Text, Cell> results = new TreeMap<Text, Cell>();
|
||||
if (value != null && value.size() != 0) {
|
||||
for (Map.Entry<Writable, Writable> e: value.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
results.put(key.getColumn(),
|
||||
((ImmutableBytesWritable) e.getValue()).get());
|
||||
results.put(key.getColumn(), (Cell)e.getValue());
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get a scanner on the current table starting at the specified row.
|
||||
* Return the specified columns.
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Selects values from tables.
|
||||
|
@ -113,7 +114,7 @@ public class SelectCommand extends BasicCommand {
|
|||
try {
|
||||
if (version != 0) {
|
||||
// A number of versions has been specified.
|
||||
byte[][] result = null;
|
||||
Cell[] result = null;
|
||||
ParsedColumns parsedColumns = getColumns(admin, false);
|
||||
boolean multiple = parsedColumns.isMultiple() || version > 1;
|
||||
for (Text column : parsedColumns.getColumns()) {
|
||||
|
@ -128,15 +129,15 @@ public class SelectCommand extends BasicCommand {
|
|||
for (int ii = 0; result != null && ii < result.length; ii++) {
|
||||
if (multiple) {
|
||||
formatter.row(new String[] { column.toString(),
|
||||
toString(column, result[ii]) });
|
||||
toString(column, result[ii].getValue()) });
|
||||
} else {
|
||||
formatter.row(new String[] { toString(column, result[ii]) });
|
||||
formatter.row(new String[] { toString(column, result[ii].getValue()) });
|
||||
}
|
||||
count++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (Map.Entry<Text, byte[]> e : table.getRow(rowKey).entrySet()) {
|
||||
for (Map.Entry<Text, Cell> e : table.getRow(rowKey).entrySet()) {
|
||||
if (count == 0) {
|
||||
formatter.header(isMultiple() ? HEADER_COLUMN_CELL : null);
|
||||
}
|
||||
|
@ -145,7 +146,7 @@ public class SelectCommand extends BasicCommand {
|
|||
if (!columns.contains(ASTERISK) && !columns.contains(keyStr)) {
|
||||
continue;
|
||||
}
|
||||
String cellData = toString(key, e.getValue());
|
||||
String cellData = toString(key, e.getValue().getValue());
|
||||
if (isMultiple()) {
|
||||
formatter.row(new String[] { key.toString(), cellData });
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.io;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/**
|
||||
* Cell - Used to transport a cell value (byte[]) and the timestamp it was
|
||||
* stored with together as a result for get and getRow methods. This promotes
|
||||
* the timestamp of a cell to a first-class value, making it easy to take
|
||||
* note of temporal data. Cell is used all the way from HStore up to HTable.
|
||||
*/
|
||||
public class Cell implements Writable {
|
||||
protected byte[] value;
|
||||
protected long timestamp;
|
||||
|
||||
/** For Writable compatibility */
|
||||
public Cell() {
|
||||
value = null;
|
||||
timestamp = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new Cell with a given value and timestamp. Used by HStore.
|
||||
*/
|
||||
public Cell(byte[] value, long timestamp) {
|
||||
this.value = value;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cell's value.
|
||||
*
|
||||
* @return cell's value
|
||||
*/
|
||||
public byte[] getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get teh cell's timestamp
|
||||
*
|
||||
* @return cell's timestamp
|
||||
*/
|
||||
public long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
//
|
||||
// Writable
|
||||
//
|
||||
|
||||
public void readFields(final DataInput in) throws IOException {
|
||||
timestamp = in.readLong();
|
||||
int valueSize = in.readInt();
|
||||
value = new byte[valueSize];
|
||||
in.readFully(value, 0, valueSize);
|
||||
}
|
||||
|
||||
public void write(final DataOutput out) throws IOException {
|
||||
out.writeLong(timestamp);
|
||||
out.writeInt(value.length);
|
||||
out.write(value);
|
||||
}
|
||||
}
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.io.Writable;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* A Writable Map.
|
||||
|
@ -59,6 +60,7 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
|||
addToMap(HStoreKey.class, code++);
|
||||
addToMap(ImmutableBytesWritable.class, code++);
|
||||
addToMap(Text.class, code++);
|
||||
addToMap(Cell.class, code++);
|
||||
}
|
||||
|
||||
@SuppressWarnings("boxing")
|
||||
|
@ -124,6 +126,14 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
|||
/** {@inheritDoc} */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Writable put(Writable key, Writable value) {
|
||||
if (!CLASS_TO_CODE.containsKey(key.getClass())) {
|
||||
throw new NullPointerException("Unsupported class " +
|
||||
key.getClass() + " cannot be used as a key.");
|
||||
}
|
||||
if (!CLASS_TO_CODE.containsKey(value.getClass())) {
|
||||
throw new NullPointerException("Unsupported class " +
|
||||
value.getClass() + " cannot be used as a value.");
|
||||
}
|
||||
return instance.put(key, value);
|
||||
}
|
||||
|
||||
|
@ -204,4 +214,4 @@ public class HbaseMapWritable implements Map<Writable, Writable>, Writable,
|
|||
instance.put(key, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.io.ObjectWritable;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* This is a customized version of the polymorphic hadoop
|
||||
|
@ -108,12 +109,17 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
|||
addToMap(HRegionInfo.class, code++);
|
||||
addToMap(BatchUpdate.class, code++);
|
||||
addToMap(HServerAddress.class, code++);
|
||||
addToMap(HRegionInfo.class, code++);
|
||||
try {
|
||||
addToMap(Class.forName("[Lorg.apache.hadoop.hbase.HMsg;"), code++);
|
||||
} catch (ClassNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
addToMap(Cell.class, code++);
|
||||
try {
|
||||
addToMap(Class.forName("[Lorg.apache.hadoop.hbase.io.Cell;"), code++);
|
||||
} catch (ClassNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
private Class<?> declaredClass;
|
||||
|
@ -344,4 +350,4 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
|||
return this.conf;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -35,7 +36,7 @@ import org.apache.hadoop.hbase.NotServingRegionException;
|
|||
*/
|
||||
public interface HRegionInterface extends VersionedProtocol {
|
||||
/** initial version */
|
||||
public static final long versionID = 1L;
|
||||
public static final long versionID = 2L;
|
||||
|
||||
/**
|
||||
* Get metainfo about an HRegion
|
||||
|
@ -57,7 +58,7 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
* @return alue for that region/row/column
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [] get(final Text regionName, final Text row, final Text column)
|
||||
public Cell get(final Text regionName, final Text row, final Text column)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -70,7 +71,7 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(final Text regionName, final Text row,
|
||||
public Cell[] get(final Text regionName, final Text row,
|
||||
final Text column, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
|
@ -86,8 +87,8 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
* @return array of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(final Text regionName, final Text row,
|
||||
final Text column, final long timestamp, final int numVersions)
|
||||
public Cell[] get(final Text regionName, final Text row,
|
||||
final Text column, final long timestamp, final int numVersions)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
|
|
@ -383,7 +383,7 @@ public class HLog implements HConstants {
|
|||
new HLogKey(regionName, tableName, key.getRow(), seqNum[counter++]);
|
||||
HLogEdit logEdit =
|
||||
new HLogEdit(key.getColumn(), es.getValue(), key.getTimestamp());
|
||||
this.writer.append(logKey, logEdit);
|
||||
this.writer.append(logKey, logEdit);
|
||||
this.numEntries++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchOperation;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
@ -1011,8 +1012,8 @@ public class HRegion implements HConstants {
|
|||
* @return column value
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [] get(Text row, Text column) throws IOException {
|
||||
byte [][] results = get(row, column, Long.MAX_VALUE, 1);
|
||||
public Cell get(Text row, Text column) throws IOException {
|
||||
Cell[] results = get(row, column, Long.MAX_VALUE, 1);
|
||||
return (results == null || results.length == 0)? null: results[0];
|
||||
}
|
||||
|
||||
|
@ -1025,7 +1026,7 @@ public class HRegion implements HConstants {
|
|||
* @return array of values one element per version
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(Text row, Text column, int numVersions) throws IOException {
|
||||
public Cell[] get(Text row, Text column, int numVersions) throws IOException {
|
||||
return get(row, column, Long.MAX_VALUE, numVersions);
|
||||
}
|
||||
|
||||
|
@ -1039,7 +1040,7 @@ public class HRegion implements HConstants {
|
|||
* @return array of values one element per version that matches the timestamp
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(Text row, Text column, long timestamp, int numVersions)
|
||||
public Cell[] get(Text row, Text column, long timestamp, int numVersions)
|
||||
throws IOException {
|
||||
|
||||
if (this.closed.get()) {
|
||||
|
@ -1072,7 +1073,7 @@ public class HRegion implements HConstants {
|
|||
* @return Map<columnName, byte[]> values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<Text, byte []> getFull(Text row) throws IOException {
|
||||
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||
return getFull(row, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
|
@ -1091,11 +1092,11 @@ public class HRegion implements HConstants {
|
|||
* @return Map<columnName, byte[]> values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<Text, byte []> getFull(Text row, long ts) throws IOException {
|
||||
public Map<Text, Cell> getFull(Text row, long ts) throws IOException {
|
||||
HStoreKey key = new HStoreKey(row, ts);
|
||||
obtainRowLock(row);
|
||||
try {
|
||||
TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
|
||||
TreeMap<Text, Cell> result = new TreeMap<Text, Cell>();
|
||||
for (Text colFamily: stores.keySet()) {
|
||||
HStore targetStore = stores.get(colFamily);
|
||||
targetStore.getFull(key, result);
|
||||
|
@ -1116,7 +1117,7 @@ public class HRegion implements HConstants {
|
|||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<Text, byte[]> getClosestRowBefore(final Text row, final long ts)
|
||||
public Map<Text, Cell> getClosestRowBefore(final Text row, final long ts)
|
||||
throws IOException{
|
||||
// look across all the HStores for this region and determine what the
|
||||
// closest key is across all column families, since the data may be sparse
|
||||
|
@ -1150,7 +1151,7 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
|
||||
// now that we've found our key, get the values
|
||||
TreeMap<Text, byte []> result = new TreeMap<Text, byte[]>();
|
||||
TreeMap<Text, Cell> result = new TreeMap<Text, Cell>();
|
||||
for (Text colFamily: stores.keySet()) {
|
||||
HStore targetStore = stores.get(colFamily);
|
||||
targetStore.getFull(key, result);
|
||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
||||
|
@ -905,14 +906,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public byte [] get(final Text regionName, final Text row,
|
||||
final Text column) throws IOException {
|
||||
|
||||
public Cell get(final Text regionName, final Text row, final Text column)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
return getRegion(regionName).get(row, column);
|
||||
|
||||
return getRegion(regionName).get(row, column);
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
@ -920,14 +919,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public byte [][] get(final Text regionName, final Text row,
|
||||
final Text column, final int numVersions) throws IOException {
|
||||
|
||||
public Cell[] get(final Text regionName, final Text row,
|
||||
final Text column, final int numVersions)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
return getRegion(regionName).get(row, column, numVersions);
|
||||
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
@ -935,14 +933,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public byte [][] get(final Text regionName, final Text row, final Text column,
|
||||
final long timestamp, final int numVersions) throws IOException {
|
||||
|
||||
public Cell[] get(final Text regionName, final Text row, final Text column,
|
||||
final long timestamp, final int numVersions)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
return getRegion(regionName).get(row, column, timestamp, numVersions);
|
||||
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
@ -951,26 +948,23 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
|
||||
/** {@inheritDoc} */
|
||||
public HbaseMapWritable getRow(final Text regionName, final Text row)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
return getRow(regionName, row, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public HbaseMapWritable getRow(final Text regionName, final Text row, final long ts)
|
||||
throws IOException {
|
||||
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
HRegion region = getRegion(regionName);
|
||||
HbaseMapWritable result = new HbaseMapWritable();
|
||||
Map<Text, byte[]> map = region.getFull(row, ts);
|
||||
for (Map.Entry<Text, byte []> es: map.entrySet()) {
|
||||
result.put(new HStoreKey(row, es.getKey()),
|
||||
new ImmutableBytesWritable(es.getValue()));
|
||||
Map<Text, Cell> map = region.getFull(row, ts);
|
||||
for (Map.Entry<Text, Cell> es: map.entrySet()) {
|
||||
result.put(new HStoreKey(row, es.getKey()), es.getValue());
|
||||
}
|
||||
return result;
|
||||
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
@ -988,7 +982,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
public HbaseMapWritable getClosestRowBefore(final Text regionName,
|
||||
final Text row, final long ts)
|
||||
throws IOException {
|
||||
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
|
@ -996,14 +989,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
HRegion region = getRegion(regionName);
|
||||
HbaseMapWritable result = new HbaseMapWritable();
|
||||
// ask the region for all the data
|
||||
Map<Text, byte[]> map = region.getClosestRowBefore(row, ts);
|
||||
Map<Text, Cell> map = region.getClosestRowBefore(row, ts);
|
||||
// convert to a MapWritable
|
||||
if (map == null) {
|
||||
return null;
|
||||
}
|
||||
for (Map.Entry<Text, byte []> es: map.entrySet()) {
|
||||
result.put(new HStoreKey(row, es.getKey()),
|
||||
new ImmutableBytesWritable(es.getValue()));
|
||||
for (Map.Entry<Text, Cell> es: map.entrySet()) {
|
||||
result.put(new HStoreKey(row, es.getKey()), es.getValue());
|
||||
}
|
||||
return result;
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -157,16 +158,16 @@ public class HStore implements HConstants {
|
|||
* @param numVersions
|
||||
* @return An array of byte arrays ordered by timestamp.
|
||||
*/
|
||||
List<byte[]> get(final HStoreKey key, final int numVersions) {
|
||||
List<Cell> get(final HStoreKey key, final int numVersions) {
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
List<byte []> results;
|
||||
List<Cell> results;
|
||||
synchronized (memcache) {
|
||||
results = internalGet(memcache, key, numVersions);
|
||||
}
|
||||
synchronized (snapshot) {
|
||||
results.addAll(results.size(),
|
||||
internalGet(snapshot, key, numVersions - results.size()));
|
||||
internalGet(snapshot, key, numVersions - results.size()));
|
||||
}
|
||||
return results;
|
||||
|
||||
|
@ -183,7 +184,7 @@ public class HStore implements HConstants {
|
|||
* @param key
|
||||
* @param results
|
||||
*/
|
||||
void getFull(HStoreKey key, SortedMap<Text, byte[]> results) {
|
||||
void getFull(HStoreKey key, SortedMap<Text, Cell> results) {
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
synchronized (memcache) {
|
||||
|
@ -198,14 +199,14 @@ public class HStore implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
private void internalGetFull(SortedMap<HStoreKey, byte []> map, HStoreKey key,
|
||||
SortedMap<Text, byte []> results) {
|
||||
private void internalGetFull(SortedMap<HStoreKey, byte[]> map, HStoreKey key,
|
||||
SortedMap<Text, Cell> results) {
|
||||
|
||||
if (map.isEmpty() || key == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
SortedMap<HStoreKey, byte []> tailMap = map.tailMap(key);
|
||||
SortedMap<HStoreKey, byte[]> tailMap = map.tailMap(key);
|
||||
for (Map.Entry<HStoreKey, byte []> es: tailMap.entrySet()) {
|
||||
HStoreKey itKey = es.getKey();
|
||||
Text itCol = itKey.getColumn();
|
||||
|
@ -213,7 +214,7 @@ public class HStore implements HConstants {
|
|||
byte [] val = tailMap.get(itKey);
|
||||
|
||||
if (!HLogEdit.isDeleted(val)) {
|
||||
results.put(itCol, val);
|
||||
results.put(itCol, new Cell(val, itKey.getTimestamp()));
|
||||
}
|
||||
|
||||
} else if (key.getRow().compareTo(itKey.getRow()) < 0) {
|
||||
|
@ -318,11 +319,11 @@ public class HStore implements HConstants {
|
|||
* @return Ordered list of items found in passed <code>map</code>. If no
|
||||
* matching values, returns an empty list (does not return null).
|
||||
*/
|
||||
private ArrayList<byte []> internalGet(
|
||||
private ArrayList<Cell> internalGet(
|
||||
final SortedMap<HStoreKey, byte []> map, final HStoreKey key,
|
||||
final int numVersions) {
|
||||
|
||||
ArrayList<byte []> result = new ArrayList<byte []>();
|
||||
ArrayList<Cell> result = new ArrayList<Cell>();
|
||||
// TODO: If get is of a particular version -- numVersions == 1 -- we
|
||||
// should be able to avoid all of the tailmap creations and iterations
|
||||
// below.
|
||||
|
@ -331,7 +332,7 @@ public class HStore implements HConstants {
|
|||
HStoreKey itKey = es.getKey();
|
||||
if (itKey.matchesRowCol(key)) {
|
||||
if (!HLogEdit.isDeleted(es.getValue())) {
|
||||
result.add(tailMap.get(itKey));
|
||||
result.add(new Cell(tailMap.get(itKey), itKey.getTimestamp()));
|
||||
}
|
||||
}
|
||||
if (numVersions > 0 && result.size() >= numVersions) {
|
||||
|
@ -1602,7 +1603,7 @@ public class HStore implements HConstants {
|
|||
*
|
||||
* The returned object should map column names to byte arrays (byte[]).
|
||||
*/
|
||||
void getFull(HStoreKey key, TreeMap<Text, byte []> results)
|
||||
void getFull(HStoreKey key, TreeMap<Text, Cell> results)
|
||||
throws IOException {
|
||||
Map<Text, List<Long>> deletes = new HashMap<Text, List<Long>>();
|
||||
|
||||
|
@ -1630,7 +1631,7 @@ public class HStore implements HConstants {
|
|||
if(isDeleted(readkey, readval.get(), true, deletes)) {
|
||||
break;
|
||||
}
|
||||
results.put(new Text(readcol), readval.get());
|
||||
results.put(new Text(readcol), new Cell(readval.get(), readkey.getTimestamp()));
|
||||
readval = new ImmutableBytesWritable();
|
||||
} else if(key.getRow().compareTo(readkey.getRow()) < 0) {
|
||||
break;
|
||||
|
@ -1660,7 +1661,7 @@ public class HStore implements HConstants {
|
|||
* @return values for the specified versions
|
||||
* @throws IOException
|
||||
*/
|
||||
byte [][] get(HStoreKey key, int numVersions) throws IOException {
|
||||
Cell[] get(HStoreKey key, int numVersions) throws IOException {
|
||||
if (numVersions <= 0) {
|
||||
throw new IllegalArgumentException("Number of versions must be > 0");
|
||||
}
|
||||
|
@ -1668,10 +1669,10 @@ public class HStore implements HConstants {
|
|||
this.lock.readLock().lock();
|
||||
try {
|
||||
// Check the memcache
|
||||
List<byte[]> results = this.memcache.get(key, numVersions);
|
||||
List<Cell> results = this.memcache.get(key, numVersions);
|
||||
// If we got sufficient versions from memcache, return.
|
||||
if (results.size() == numVersions) {
|
||||
return ImmutableBytesWritable.toArray(results);
|
||||
return results.toArray(new Cell[results.size()]);
|
||||
}
|
||||
|
||||
// Keep a list of deleted cell keys. We need this because as we go through
|
||||
|
@ -1702,7 +1703,7 @@ public class HStore implements HConstants {
|
|||
continue;
|
||||
}
|
||||
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
||||
results.add(readval.get());
|
||||
results.add(new Cell(readval.get(), readkey.getTimestamp()));
|
||||
// Perhaps only one version is wanted. I could let this
|
||||
// test happen later in the for loop test but it would cost
|
||||
// the allocation of an ImmutableBytesWritable.
|
||||
|
@ -1716,7 +1717,7 @@ public class HStore implements HConstants {
|
|||
!hasEnoughVersions(numVersions, results);
|
||||
readval = new ImmutableBytesWritable()) {
|
||||
if (!isDeleted(readkey, readval.get(), true, deletes)) {
|
||||
results.add(readval.get());
|
||||
results.add(new Cell(readval.get(), readkey.getTimestamp()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1725,14 +1726,14 @@ public class HStore implements HConstants {
|
|||
}
|
||||
}
|
||||
return results.size() == 0 ?
|
||||
null : ImmutableBytesWritable.toArray(results);
|
||||
null : results.toArray(new Cell[results.size()]);
|
||||
} finally {
|
||||
this.lock.readLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean hasEnoughVersions(final int numVersions,
|
||||
final List<byte []> results) {
|
||||
final List<Cell> results) {
|
||||
return numVersions > 0 && results.size() >= numVersions;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@ import org.znerd.xmlenc.LineBreak;
|
|||
import org.znerd.xmlenc.XMLOutputter;
|
||||
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* GenericHandler contains some basic common stuff that all the individual
|
||||
* REST handler types take advantage of.
|
||||
|
@ -226,7 +228,7 @@ public abstract class GenericHandler {
|
|||
* @throws IOException
|
||||
*/
|
||||
protected void outputColumnsXml(final XMLOutputter outputter,
|
||||
final Map<Text, byte[]> m)
|
||||
final Map<Text, byte[]> m)
|
||||
throws IllegalStateException, IllegalArgumentException, IOException {
|
||||
for (Map.Entry<Text, byte[]> e: m.entrySet()) {
|
||||
outputter.startTag(COLUMN);
|
||||
|
@ -239,18 +241,19 @@ public abstract class GenericHandler {
|
|||
outputter.endTag();
|
||||
}
|
||||
}
|
||||
|
||||
protected void outputColumnsMime(final MultiPartResponse mpr,
|
||||
final Map<Text, byte[]> m)
|
||||
throws IOException {
|
||||
for (Map.Entry<Text, byte[]> e: m.entrySet()) {
|
||||
mpr.startPart("application/octet-stream",
|
||||
new String [] {"Content-Description: " + e.getKey().toString(),
|
||||
"Content-Transfer-Encoding: binary",
|
||||
"Content-Length: " + e.getValue().length});
|
||||
mpr.getOut().write(e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
// Commented - multipart support is currently nonexistant.
|
||||
// protected void outputColumnsMime(final MultiPartResponse mpr,
|
||||
// final Map<Text, Cell> m)
|
||||
// throws IOException {
|
||||
// for (Map.Entry<Text, Cell> e: m.entrySet()) {
|
||||
// mpr.startPart("application/octet-stream",
|
||||
// new String [] {"Content-Description: " + e.getKey().toString(),
|
||||
// "Content-Transfer-Encoding: binary",
|
||||
// "Content-Length: " + e.getValue().getValue().length});
|
||||
// mpr.getOut().write(e.getValue().getValue());
|
||||
// }
|
||||
// }
|
||||
|
||||
/*
|
||||
* Get an HTable instance by it's table name.
|
||||
|
|
|
@ -154,7 +154,8 @@ public class ScannerHandler extends GenericHandler {
|
|||
outputScannerEntryXML(response, sr);
|
||||
break;
|
||||
case MIME:
|
||||
outputScannerEntryMime(response, sr);
|
||||
/* outputScannerEntryMime(response, sr);*/
|
||||
doNotAcceptable(response);
|
||||
break;
|
||||
default:
|
||||
doNotAcceptable(response);
|
||||
|
@ -199,48 +200,48 @@ public class ScannerHandler extends GenericHandler {
|
|||
outputter.getWriter().close();
|
||||
}
|
||||
|
||||
private void outputScannerEntryMime(final HttpServletResponse response,
|
||||
final ScannerRecord sr)
|
||||
throws IOException {
|
||||
response.setStatus(200);
|
||||
// This code ties me to the jetty server.
|
||||
MultiPartResponse mpr = new MultiPartResponse(response);
|
||||
// Content type should look like this for multipart:
|
||||
// Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||
String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||
mpr.getBoundary() + "\"";
|
||||
// Setting content type is broken. I'm unable to set parameters on the
|
||||
// content-type; They get stripped. Can't set boundary, etc.
|
||||
// response.addHeader("Content-Type", ct);
|
||||
response.setContentType(ct);
|
||||
// Write row, key-column and timestamp each in its own part.
|
||||
mpr.startPart("application/octet-stream",
|
||||
new String [] {"Content-Description: row",
|
||||
"Content-Transfer-Encoding: binary",
|
||||
"Content-Length: " + sr.getKey().getRow().getBytes().length});
|
||||
mpr.getOut().write(sr.getKey().getRow().getBytes());
|
||||
|
||||
// Usually key-column is empty when scanning.
|
||||
if (sr.getKey().getColumn() != null &&
|
||||
sr.getKey().getColumn().getLength() > 0) {
|
||||
mpr.startPart("application/octet-stream",
|
||||
new String [] {"Content-Description: key-column",
|
||||
"Content-Transfer-Encoding: binary",
|
||||
"Content-Length: " + sr.getKey().getColumn().getBytes().length});
|
||||
}
|
||||
mpr.getOut().write(sr.getKey().getColumn().getBytes());
|
||||
// TODO: Fix. Need to write out the timestamp in the ordained timestamp
|
||||
// format.
|
||||
byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes();
|
||||
mpr.startPart("application/octet-stream",
|
||||
new String [] {"Content-Description: timestamp",
|
||||
"Content-Transfer-Encoding: binary",
|
||||
"Content-Length: " + timestampBytes.length});
|
||||
mpr.getOut().write(timestampBytes);
|
||||
// Write out columns
|
||||
outputColumnsMime(mpr, sr.getValue());
|
||||
mpr.close();
|
||||
}
|
||||
// private void outputScannerEntryMime(final HttpServletResponse response,
|
||||
// final ScannerRecord sr)
|
||||
// throws IOException {
|
||||
// response.setStatus(200);
|
||||
// // This code ties me to the jetty server.
|
||||
// MultiPartResponse mpr = new MultiPartResponse(response);
|
||||
// // Content type should look like this for multipart:
|
||||
// // Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||
// String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||
// mpr.getBoundary() + "\"";
|
||||
// // Setting content type is broken. I'm unable to set parameters on the
|
||||
// // content-type; They get stripped. Can't set boundary, etc.
|
||||
// // response.addHeader("Content-Type", ct);
|
||||
// response.setContentType(ct);
|
||||
// // Write row, key-column and timestamp each in its own part.
|
||||
// mpr.startPart("application/octet-stream",
|
||||
// new String [] {"Content-Description: row",
|
||||
// "Content-Transfer-Encoding: binary",
|
||||
// "Content-Length: " + sr.getKey().getRow().getBytes().length});
|
||||
// mpr.getOut().write(sr.getKey().getRow().getBytes());
|
||||
//
|
||||
// // Usually key-column is empty when scanning.
|
||||
// if (sr.getKey().getColumn() != null &&
|
||||
// sr.getKey().getColumn().getLength() > 0) {
|
||||
// mpr.startPart("application/octet-stream",
|
||||
// new String [] {"Content-Description: key-column",
|
||||
// "Content-Transfer-Encoding: binary",
|
||||
// "Content-Length: " + sr.getKey().getColumn().getBytes().length});
|
||||
// }
|
||||
// mpr.getOut().write(sr.getKey().getColumn().getBytes());
|
||||
// // TODO: Fix. Need to write out the timestamp in the ordained timestamp
|
||||
// // format.
|
||||
// byte [] timestampBytes = Long.toString(sr.getKey().getTimestamp()).getBytes();
|
||||
// mpr.startPart("application/octet-stream",
|
||||
// new String [] {"Content-Description: timestamp",
|
||||
// "Content-Transfer-Encoding: binary",
|
||||
// "Content-Length: " + timestampBytes.length});
|
||||
// mpr.getOut().write(timestampBytes);
|
||||
// // Write out columns
|
||||
// outputColumnsMime(mpr, sr.getValue());
|
||||
// mpr.close();
|
||||
// }
|
||||
|
||||
/*
|
||||
* Create scanner
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
import org.mortbay.servlet.MultiPartResponse;
|
||||
import org.w3c.dom.Document;
|
||||
|
@ -133,7 +134,7 @@ public class TableHandler extends GenericHandler {
|
|||
// They want full row returned.
|
||||
|
||||
// Presumption is that this.table has already been focused on target table.
|
||||
Map<Text, byte[]> result = timestampStr == null ?
|
||||
Map<Text, Cell> result = timestampStr == null ?
|
||||
table.getRow(new Text(row))
|
||||
: table.getRow(new Text(row), Long.parseLong(timestampStr));
|
||||
|
||||
|
@ -145,15 +146,13 @@ public class TableHandler extends GenericHandler {
|
|||
outputRowXml(response, result);
|
||||
break;
|
||||
case MIME:
|
||||
outputRowMime(response, result);
|
||||
break;
|
||||
default:
|
||||
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
||||
request.getHeader(CONTENT_TYPE));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Map<Text, byte[]> prefiltered_result = table.getRow(new Text(row));
|
||||
Map<Text, Cell> prefiltered_result = table.getRow(new Text(row));
|
||||
|
||||
if (prefiltered_result == null || prefiltered_result.size() == 0) {
|
||||
doNotFound(response, "Row not found!");
|
||||
|
@ -166,10 +165,12 @@ public class TableHandler extends GenericHandler {
|
|||
}
|
||||
|
||||
// output map that will contain the filtered results
|
||||
Map<Text, byte[]> m = new HashMap<Text, byte[]>();
|
||||
Map<Text, Cell> m = new HashMap<Text, Cell>();
|
||||
|
||||
// get an array of all the columns retrieved
|
||||
Object[] columns_retrieved = prefiltered_result.keySet().toArray();
|
||||
Text[] columns_retrieved =
|
||||
prefiltered_result.keySet().toArray(
|
||||
new Text[prefiltered_result.keySet().size()]);
|
||||
|
||||
// copy over those cells with requested column names
|
||||
for(int i = 0; i < columns_retrieved.length; i++){
|
||||
|
@ -184,8 +185,6 @@ public class TableHandler extends GenericHandler {
|
|||
outputRowXml(response, m);
|
||||
break;
|
||||
case MIME:
|
||||
outputRowMime(response, m);
|
||||
break;
|
||||
default:
|
||||
doNotAcceptable(response, "Unsupported Accept Header Content: " +
|
||||
request.getHeader(CONTENT_TYPE));
|
||||
|
@ -201,13 +200,17 @@ public class TableHandler extends GenericHandler {
|
|||
* @throws IOException
|
||||
*/
|
||||
private void outputRowXml(final HttpServletResponse response,
|
||||
final Map<Text, byte[]> result)
|
||||
final Map<Text, Cell> result)
|
||||
throws IOException {
|
||||
setResponseHeader(response, result.size() > 0? 200: 204,
|
||||
ContentType.XML.toString());
|
||||
XMLOutputter outputter = getXMLOutputter(response.getWriter());
|
||||
outputter.startTag(ROW);
|
||||
outputColumnsXml(outputter, result);
|
||||
HashMap<Text, byte[]> converted = new HashMap<Text, byte[]>();
|
||||
for (Map.Entry<Text, Cell> entry : result.entrySet()) {
|
||||
converted.put(entry.getKey(), entry.getValue().getValue());
|
||||
}
|
||||
outputColumnsXml(outputter, converted);
|
||||
outputter.endTag();
|
||||
outputter.endDocument();
|
||||
outputter.getWriter().close();
|
||||
|
@ -218,23 +221,23 @@ public class TableHandler extends GenericHandler {
|
|||
* @param result
|
||||
* Output the results contained in result as a multipart/related response.
|
||||
*/
|
||||
private void outputRowMime(final HttpServletResponse response,
|
||||
final Map<Text, byte[]> result)
|
||||
throws IOException {
|
||||
response.setStatus(result.size() > 0? 200: 204);
|
||||
// This code ties me to the jetty server.
|
||||
MultiPartResponse mpr = new MultiPartResponse(response);
|
||||
// Content type should look like this for multipart:
|
||||
// Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||
String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||
mpr.getBoundary() + "\"";
|
||||
// Setting content type is broken. I'm unable to set parameters on the
|
||||
// content-type; They get stripped. Can't set boundary, etc.
|
||||
// response.addHeader("Content-Type", ct);
|
||||
response.setContentType(ct);
|
||||
outputColumnsMime(mpr, result);
|
||||
mpr.close();
|
||||
}
|
||||
// private void outputRowMime(final HttpServletResponse response,
|
||||
// final Map<Text, Cell> result)
|
||||
// throws IOException {
|
||||
// response.setStatus(result.size() > 0? 200: 204);
|
||||
// // This code ties me to the jetty server.
|
||||
// MultiPartResponse mpr = new MultiPartResponse(response);
|
||||
// // Content type should look like this for multipart:
|
||||
// // Content-type: multipart/related;start="<rootpart*94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6@example.jaxws.sun.com>";type="application/xop+xml";boundary="uuid:94ebf1e6-7eb5-43f1-85f4-2615fc40c5d6";start-info="text/xml"
|
||||
// String ct = ContentType.MIME.toString() + ";charset=\"UTF-8\";boundary=\"" +
|
||||
// mpr.getBoundary() + "\"";
|
||||
// // Setting content type is broken. I'm unable to set parameters on the
|
||||
// // content-type; They get stripped. Can't set boundary, etc.
|
||||
// // response.addHeader("Content-Type", ct);
|
||||
// response.setContentType(ct);
|
||||
// outputColumnsMime(mpr, result);
|
||||
// mpr.close();
|
||||
// }
|
||||
|
||||
/*
|
||||
* @param request
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.thrift.generated.RegionDescriptor;
|
|||
import org.apache.hadoop.hbase.thrift.generated.ScanEntry;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
import com.facebook.thrift.TException;
|
||||
import com.facebook.thrift.protocol.TBinaryProtocol;
|
||||
|
@ -84,7 +85,7 @@ public class ThriftServer {
|
|||
* name of table
|
||||
* @return HTable object
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
* @throws IOError
|
||||
*/
|
||||
protected HTable getTable(final byte[] tableName) throws IOError,
|
||||
IOException {
|
||||
|
@ -144,6 +145,7 @@ public class ThriftServer {
|
|||
* UTF-8 encoded bytes
|
||||
* @return Text object
|
||||
* @throws IllegalArgument
|
||||
* @throws IOError
|
||||
*/
|
||||
Text getText(byte[] buf) throws IOError {
|
||||
try {
|
||||
|
@ -200,11 +202,11 @@ public class ThriftServer {
|
|||
}
|
||||
try {
|
||||
HTable table = getTable(tableName);
|
||||
byte[] value = table.get(getText(row), getText(column));
|
||||
Cell value = table.get(getText(row), getText(column));
|
||||
if (value == null) {
|
||||
throw new NotFound();
|
||||
}
|
||||
return value;
|
||||
return value.getValue();
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e.getMessage());
|
||||
}
|
||||
|
@ -219,11 +221,16 @@ public class ThriftServer {
|
|||
}
|
||||
try {
|
||||
HTable table = getTable(tableName);
|
||||
byte[][] values = table.get(getText(row), getText(column), numVersions);
|
||||
Cell[] values =
|
||||
table.get(getText(row), getText(column), numVersions);
|
||||
if (values == null) {
|
||||
throw new NotFound();
|
||||
}
|
||||
return new ArrayList<byte[]>(Arrays.asList(values));
|
||||
ArrayList<byte[]> list = new ArrayList<byte[]>();
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
list.add(values[i].getValue());
|
||||
}
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e.getMessage());
|
||||
}
|
||||
|
@ -239,12 +246,16 @@ public class ThriftServer {
|
|||
}
|
||||
try {
|
||||
HTable table = getTable(tableName);
|
||||
byte[][] values = table.get(getText(row), getText(column), timestamp,
|
||||
numVersions);
|
||||
Cell[] values = table.get(getText(row),
|
||||
getText(column), timestamp, numVersions);
|
||||
if (values == null) {
|
||||
throw new NotFound();
|
||||
}
|
||||
return new ArrayList<byte[]>(Arrays.asList(values));
|
||||
ArrayList<byte[]> list = new ArrayList<byte[]>();
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
list.add(values[i].getValue());
|
||||
}
|
||||
return list;
|
||||
} catch (IOException e) {
|
||||
throw new IOError(e.getMessage());
|
||||
}
|
||||
|
@ -263,11 +274,12 @@ public class ThriftServer {
|
|||
}
|
||||
try {
|
||||
HTable table = getTable(tableName);
|
||||
SortedMap<Text, byte[]> values = table.getRow(getText(row), timestamp);
|
||||
SortedMap<Text, Cell> values =
|
||||
table.getRow(getText(row), timestamp);
|
||||
// copy the map from type <Text, byte[]> to <byte[], byte[]>
|
||||
HashMap<byte[], byte[]> returnValues = new HashMap<byte[], byte[]>();
|
||||
for (Entry<Text, byte[]> e : values.entrySet()) {
|
||||
returnValues.put(e.getKey().getBytes(), e.getValue());
|
||||
for (Entry<Text, Cell> e : values.entrySet()) {
|
||||
returnValues.put(e.getKey().getBytes(), e.getValue().getValue());
|
||||
}
|
||||
return returnValues;
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -426,7 +426,8 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
t.put(lockid, HConstants.COL_STARTCODE, Writables.longToBytes(startCode));
|
||||
t.commit(lockid);
|
||||
// Assert added.
|
||||
byte [] bytes = t.get(region.getRegionName(), HConstants.COL_REGIONINFO);
|
||||
byte [] bytes =
|
||||
t.get(region.getRegionName(), HConstants.COL_REGIONINFO).getValue();
|
||||
HRegionInfo hri = Writables.getHRegionInfo(bytes);
|
||||
assertEquals(region.getRegionId(), hri.getRegionId());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
|
|||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
||||
/**
|
||||
|
@ -340,7 +340,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @return value for row/column pair
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [] get(Text row, Text column) throws IOException;
|
||||
public Cell get(Text row, Text column) throws IOException;
|
||||
/**
|
||||
* @param row
|
||||
* @param column
|
||||
|
@ -348,7 +348,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @return value for row/column pair for number of versions requested
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(Text row, Text column, int versions) throws IOException;
|
||||
public Cell[] get(Text row, Text column, int versions) throws IOException;
|
||||
/**
|
||||
* @param row
|
||||
* @param column
|
||||
|
@ -357,7 +357,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @return value for row/column/timestamp tuple for number of versions
|
||||
* @throws IOException
|
||||
*/
|
||||
public byte [][] get(Text row, Text column, long ts, int versions)
|
||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException;
|
||||
/**
|
||||
* @param lockid
|
||||
|
@ -478,16 +478,16 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
return this.region.getScanner(columns, firstRow, ts, null);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[] get(Text row, Text column) throws IOException {
|
||||
public Cell get(Text row, Text column) throws IOException {
|
||||
return this.region.get(row, column);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[][] get(Text row, Text column, int versions) throws IOException {
|
||||
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||
return this.region.get(row, column, versions);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[][] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException {
|
||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException {
|
||||
return this.region.get(row, column, ts, versions);
|
||||
}
|
||||
/**
|
||||
|
@ -495,7 +495,7 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
* @return values for each column in the specified row
|
||||
* @throws IOException
|
||||
*/
|
||||
public Map<Text, byte []> getFull(Text row) throws IOException {
|
||||
public Map<Text, Cell> getFull(Text row) throws IOException {
|
||||
return region.getFull(row);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
|
@ -550,17 +550,34 @@ public abstract class HBaseTestCase extends TestCase {
|
|||
return this.table.obtainScanner(columns, firstRow, ts, null);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[] get(Text row, Text column) throws IOException {
|
||||
public Cell get(Text row, Text column) throws IOException {
|
||||
return this.table.get(row, column);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[][] get(Text row, Text column, int versions) throws IOException {
|
||||
public Cell[] get(Text row, Text column, int versions) throws IOException {
|
||||
return this.table.get(row, column, versions);
|
||||
}
|
||||
/** {@inheritDoc} */
|
||||
public byte[][] get(Text row, Text column, long ts, int versions)
|
||||
public Cell[] get(Text row, Text column, long ts, int versions)
|
||||
throws IOException {
|
||||
return this.table.get(row, column, ts, versions);
|
||||
}
|
||||
}
|
||||
|
||||
protected void assertCellEquals(final HRegion region, final Text row,
|
||||
final Text column, final long timestamp, final String value)
|
||||
throws IOException {
|
||||
Map<Text, Cell> result = region.getFull(row, timestamp);
|
||||
Cell cell_value = result.get(column);
|
||||
if(value == null){
|
||||
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
||||
} else {
|
||||
if (cell_value == null) {
|
||||
fail(column.toString() + " at timestamp " + timestamp +
|
||||
"\" was expected to be \"" + value + " but was null");
|
||||
}
|
||||
assertEquals(column.toString() + " at timestamp "
|
||||
+ timestamp, value, new String(cell_value.getValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.IOException;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
|
||||
|
@ -206,10 +207,10 @@ public class TestBloomFilters extends HBaseClusterTestCase {
|
|||
|
||||
|
||||
for(int i = 0; i < testKeys.length; i++) {
|
||||
byte[] value = table.get(testKeys[i], CONTENTS);
|
||||
if(value != null && value.length != 0) {
|
||||
Cell value = table.get(testKeys[i], CONTENTS);
|
||||
if(value != null && value.getValue().length != 0) {
|
||||
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
||||
new String(value, HConstants.UTF8_ENCODING));
|
||||
new String(value.getValue(), HConstants.UTF8_ENCODING));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -271,10 +272,10 @@ public class TestBloomFilters extends HBaseClusterTestCase {
|
|||
}
|
||||
|
||||
for(int i = 0; i < testKeys.length; i++) {
|
||||
byte[] value = table.get(testKeys[i], CONTENTS);
|
||||
if(value != null && value.length != 0) {
|
||||
Cell value = table.get(testKeys[i], CONTENTS);
|
||||
if(value != null && value.getValue().length != 0) {
|
||||
LOG.info("non existant key: " + testKeys[i] + " returned value: " +
|
||||
new String(value, HConstants.UTF8_ENCODING));
|
||||
new String(value.getValue(), HConstants.UTF8_ENCODING));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
Text rowlabel = new Text("row_" + k);
|
||||
|
||||
byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC);
|
||||
byte bodydata[] = table.get(rowlabel, CONTENTS_BASIC).getValue();
|
||||
assertNotNull("no data for row " + rowlabel + "/" + CONTENTS_BASIC,
|
||||
bodydata);
|
||||
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||
|
@ -125,7 +125,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
bodystr + "'", teststr.compareTo(bodystr) == 0);
|
||||
|
||||
collabel = new Text(ANCHORNUM + k);
|
||||
bodydata = table.get(rowlabel, collabel);
|
||||
bodydata = table.get(rowlabel, collabel).getValue();
|
||||
assertNotNull("no data for row " + rowlabel + "/" + collabel, bodydata);
|
||||
bodystr = new String(bodydata, HConstants.UTF8_ENCODING);
|
||||
teststr = ANCHORSTR + k;
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Test compactions
|
||||
|
@ -101,9 +102,10 @@ public class TestCompaction extends HBaseTestCase {
|
|||
// Assert > 3 and then after compaction, assert that only 3 versions
|
||||
// available.
|
||||
addContent(new HRegionIncommon(r), COLUMN_FAMILY);
|
||||
byte [][] bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||
Cell[] cellValues =
|
||||
r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||
// Assert that I can get > 5 versions (Should be at least 5 in there).
|
||||
assertTrue(bytes.length >= 5);
|
||||
assertTrue(cellValues.length >= 5);
|
||||
// Try to run compaction concurrent with a thread flush just to see that
|
||||
// we can.
|
||||
final HRegion region = this.r;
|
||||
|
@ -142,36 +144,36 @@ public class TestCompaction extends HBaseTestCase {
|
|||
// Increment the least significant character so we get to next row.
|
||||
secondRowBytes[START_KEY_BYTES.length - 1]++;
|
||||
Text secondRow = new Text(secondRowBytes);
|
||||
bytes = this.r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
|
||||
LOG.info("Count of " + secondRow + ": " + bytes.length);
|
||||
cellValues = r.get(secondRow, COLUMN_FAMILY_TEXT, 100/*Too many*/);
|
||||
LOG.info("Count of " + secondRow + ": " + cellValues.length);
|
||||
// Commented out because fails on an hp+ubuntu single-processor w/ 1G and
|
||||
// "Intel(R) Pentium(R) 4 CPU 3.20GHz" though passes on all local
|
||||
// machines and even on hudson. On said machine, its reporting in the
|
||||
// LOG line above that there are 3 items in row so it should pass the
|
||||
// below test.
|
||||
assertTrue(bytes.length == 3 || bytes.length == 4);
|
||||
assertTrue(cellValues.length == 3 || cellValues.length == 4);
|
||||
|
||||
// Now add deletes to memcache and then flush it. That will put us over
|
||||
// the compaction threshold of 3 store files. Compacting these store files
|
||||
// should result in a compacted store file that has no references to the
|
||||
// deleted row.
|
||||
this.r.deleteAll(STARTROW, COLUMN_FAMILY_TEXT, System.currentTimeMillis());
|
||||
r.deleteAll(STARTROW, COLUMN_FAMILY_TEXT, System.currentTimeMillis());
|
||||
// Now, before compacting, remove all instances of the first row so can
|
||||
// verify that it is removed as we compact.
|
||||
// Assert all delted.
|
||||
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||
this.r.flushcache();
|
||||
assertNull(this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||
r.flushcache();
|
||||
assertNull(r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/));
|
||||
// Add a bit of data and flush it so we for sure have the compaction limit
|
||||
// for store files. Usually by this time we will have but if compaction
|
||||
// included the flush that ran 'concurrently', there may be just the
|
||||
// compacted store and the flush above when we added deletes. Add more
|
||||
// content to be certain.
|
||||
createSmallerStoreFile(this.r);
|
||||
assertTrue(this.r.compactIfNeeded());
|
||||
assertTrue(r.compactIfNeeded());
|
||||
// Assert that the first row is still deleted.
|
||||
bytes = this.r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||
assertNull(bytes);
|
||||
cellValues = r.get(STARTROW, COLUMN_FAMILY_TEXT, 100 /*Too many*/);
|
||||
assertNull(cellValues);
|
||||
// Assert the store files do not have the first record 'aaa' keys in them.
|
||||
for (MapFile.Reader reader:
|
||||
this.r.stores.get(COLUMN_FAMILY_TEXT_MINUS_COLON).getReaders()) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HBaseTestCase;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Test the functionality of deleteAll.
|
||||
|
@ -120,42 +121,25 @@ public class TestDeleteAll extends HBaseTestCase {
|
|||
// call delete all at a timestamp, make sure only the most recent stuff is left behind
|
||||
region.deleteAll(row, t1);
|
||||
if (flush) {region_incommon.flushcache();}
|
||||
assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colA, t1, null);
|
||||
assertCellValueEquals(region, row, colA, t2, null);
|
||||
assertCellValueEquals(region, row, colD, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colD, t1, null);
|
||||
assertCellValueEquals(region, row, colD, t2, null);
|
||||
assertCellEquals(region, row, colA, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colA, t1, null);
|
||||
assertCellEquals(region, row, colA, t2, null);
|
||||
assertCellEquals(region, row, colD, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colD, t1, null);
|
||||
assertCellEquals(region, row, colD, t2, null);
|
||||
|
||||
// call delete all w/o a timestamp, make sure nothing is left.
|
||||
region.deleteAll(row, HConstants.LATEST_TIMESTAMP);
|
||||
if (flush) {region_incommon.flushcache();}
|
||||
assertCellValueEquals(region, row, colA, t0, null);
|
||||
assertCellValueEquals(region, row, colA, t1, null);
|
||||
assertCellValueEquals(region, row, colA, t2, null);
|
||||
assertCellValueEquals(region, row, colD, t0, null);
|
||||
assertCellValueEquals(region, row, colD, t1, null);
|
||||
assertCellValueEquals(region, row, colD, t2, null);
|
||||
assertCellEquals(region, row, colA, t0, null);
|
||||
assertCellEquals(region, row, colA, t1, null);
|
||||
assertCellEquals(region, row, colA, t2, null);
|
||||
assertCellEquals(region, row, colD, t0, null);
|
||||
assertCellEquals(region, row, colD, t1, null);
|
||||
assertCellEquals(region, row, colD, t2, null);
|
||||
|
||||
}
|
||||
|
||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
||||
final Text column, final long timestamp, final String value)
|
||||
throws IOException {
|
||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
||||
byte[] cell_value = result.get(column);
|
||||
if(value == null){
|
||||
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
||||
} else {
|
||||
if (cell_value == null) {
|
||||
fail(column.toString() + " at timestamp " + timestamp +
|
||||
"\" was expected to be \"" + value + " but was null");
|
||||
}
|
||||
assertEquals(column.toString() + " at timestamp "
|
||||
+ timestamp, value, new String(cell_value));
|
||||
}
|
||||
}
|
||||
|
||||
private String cellData(int tsNum, boolean flush){
|
||||
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestCase;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Test the functionality of deleteFamily.
|
||||
|
@ -117,15 +118,15 @@ public class TestDeleteFamily extends HBaseTestCase {
|
|||
// most recent for A,B,C should be fine
|
||||
// A,B at older timestamps should be gone
|
||||
// C should be fine for older timestamps
|
||||
assertCellValueEquals(region, row, colA, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colA, t1, null);
|
||||
assertCellValueEquals(region, row, colA, t2, null);
|
||||
assertCellValueEquals(region, row, colB, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colB, t1, null);
|
||||
assertCellValueEquals(region, row, colB, t2, null);
|
||||
assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
|
||||
assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
|
||||
assertCellEquals(region, row, colA, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colA, t1, null);
|
||||
assertCellEquals(region, row, colA, t2, null);
|
||||
assertCellEquals(region, row, colB, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colB, t1, null);
|
||||
assertCellEquals(region, row, colB, t2, null);
|
||||
assertCellEquals(region, row, colC, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colC, t1, cellData(1, flush));
|
||||
assertCellEquals(region, row, colC, t2, cellData(2, flush));
|
||||
|
||||
// call delete family w/o a timestamp, make sure nothing is left except for
|
||||
// column C.
|
||||
|
@ -133,31 +134,14 @@ public class TestDeleteFamily extends HBaseTestCase {
|
|||
if (flush) {region_incommon.flushcache();}
|
||||
// A,B for latest timestamp should be gone
|
||||
// C should still be fine
|
||||
assertCellValueEquals(region, row, colA, t0, null);
|
||||
assertCellValueEquals(region, row, colB, t0, null);
|
||||
assertCellValueEquals(region, row, colC, t0, cellData(0, flush));
|
||||
assertCellValueEquals(region, row, colC, t1, cellData(1, flush));
|
||||
assertCellValueEquals(region, row, colC, t2, cellData(2, flush));
|
||||
assertCellEquals(region, row, colA, t0, null);
|
||||
assertCellEquals(region, row, colB, t0, null);
|
||||
assertCellEquals(region, row, colC, t0, cellData(0, flush));
|
||||
assertCellEquals(region, row, colC, t1, cellData(1, flush));
|
||||
assertCellEquals(region, row, colC, t2, cellData(2, flush));
|
||||
|
||||
}
|
||||
|
||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
||||
final Text column, final long timestamp, final String value)
|
||||
throws IOException {
|
||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
||||
byte[] cell_value = result.get(column);
|
||||
if(value == null){
|
||||
assertEquals(column.toString() + " at timestamp " + timestamp, null, cell_value);
|
||||
} else {
|
||||
if (cell_value == null) {
|
||||
fail(column.toString() + " at timestamp " + timestamp +
|
||||
"\" was expected to be \"" + value + " but was null");
|
||||
}
|
||||
assertEquals(column.toString() + " at timestamp "
|
||||
+ timestamp, value, new String(cell_value));
|
||||
}
|
||||
}
|
||||
|
||||
private String cellData(int tsNum, boolean flush){
|
||||
return "t" + tsNum + " data" + (flush ? " - with flush" : "");
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/** Test case for get */
|
||||
public class TestGet extends HBaseTestCase {
|
||||
|
@ -53,7 +53,7 @@ public class TestGet extends HBaseTestCase {
|
|||
private void verifyGet(final HRegionIncommon r, final String expectedServer)
|
||||
throws IOException {
|
||||
// This should return a value because there is only one family member
|
||||
byte [] value = r.get(ROW_KEY, CONTENTS);
|
||||
Cell value = r.get(ROW_KEY, CONTENTS);
|
||||
assertNotNull(value);
|
||||
|
||||
// This should not return a value because there are multiple family members
|
||||
|
@ -61,13 +61,13 @@ public class TestGet extends HBaseTestCase {
|
|||
assertNull(value);
|
||||
|
||||
// Find out what getFull returns
|
||||
Map<Text, byte []> values = r.getFull(ROW_KEY);
|
||||
Map<Text, Cell> values = r.getFull(ROW_KEY);
|
||||
|
||||
// assertEquals(4, values.keySet().size());
|
||||
for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
|
||||
Text column = i.next();
|
||||
if (column.equals(HConstants.COL_SERVER)) {
|
||||
String server = Writables.bytesToString(values.get(column));
|
||||
String server = Writables.bytesToString(values.get(column).getValue());
|
||||
assertEquals(expectedServer, server);
|
||||
LOG.info(server);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* {@link TestGet} is a medley of tests of get all done up as a single test.
|
||||
|
@ -127,14 +127,14 @@ public class TestGet2 extends HBaseTestCase {
|
|||
region_incommon.put(lockid, COLUMNS[0], "new text".getBytes());
|
||||
region_incommon.commit(lockid, right_now);
|
||||
|
||||
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||
|
||||
// Force a flush so store files come into play.
|
||||
region_incommon.flushcache();
|
||||
|
||||
assertCellValueEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||
assertCellValueEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||
assertCellEquals(region, t, COLUMNS[0], right_now, "new text");
|
||||
assertCellEquals(region, t, COLUMNS[0], one_second_ago, "old text");
|
||||
|
||||
} finally {
|
||||
if (region != null) {
|
||||
|
@ -186,33 +186,33 @@ public class TestGet2 extends HBaseTestCase {
|
|||
|
||||
// try finding "015"
|
||||
Text t15 = new Text("015");
|
||||
Map<Text, byte[]> results =
|
||||
Map<Text, Cell> results =
|
||||
region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t10 bytes");
|
||||
|
||||
// try "020", we should get that row exactly
|
||||
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t20 bytes");
|
||||
|
||||
// try "050", should get stuff from "040"
|
||||
Text t50 = new Text("050");
|
||||
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t40 bytes");
|
||||
|
||||
// force a flush
|
||||
region.flushcache();
|
||||
|
||||
// try finding "015"
|
||||
results = region.getClosestRowBefore(t15, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t10 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t10 bytes");
|
||||
|
||||
// try "020", we should get that row exactly
|
||||
results = region.getClosestRowBefore(t20, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t20 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t20 bytes");
|
||||
|
||||
// try "050", should get stuff from "040"
|
||||
results = region.getClosestRowBefore(t50, HConstants.LATEST_TIMESTAMP);
|
||||
assertEquals(new String(results.get(COLUMNS[0])), "t40 bytes");
|
||||
assertEquals(new String(results.get(COLUMNS[0]).getValue()), "t40 bytes");
|
||||
} finally {
|
||||
if (region != null) {
|
||||
try {
|
||||
|
@ -224,26 +224,18 @@ public class TestGet2 extends HBaseTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void assertCellValueEquals(final HRegion region, final Text row,
|
||||
final Text column, final long timestamp, final String value)
|
||||
throws IOException {
|
||||
Map<Text, byte[]> result = region.getFull(row, timestamp);
|
||||
assertEquals("cell value at a given timestamp", new String(result.get(column)), value);
|
||||
}
|
||||
|
||||
|
||||
private void assertColumnsPresent(final HRegion r, final Text row)
|
||||
throws IOException {
|
||||
Map<Text, byte[]> result = r.getFull(row);
|
||||
Map<Text, Cell> result = r.getFull(row);
|
||||
int columnCount = 0;
|
||||
for (Map.Entry<Text, byte[]> e: result.entrySet()) {
|
||||
for (Map.Entry<Text, Cell> e: result.entrySet()) {
|
||||
columnCount++;
|
||||
String column = e.getKey().toString();
|
||||
boolean legitColumn = false;
|
||||
for (int i = 0; i < COLUMNS.length; i++) {
|
||||
// Assert value is same as row. This is 'nature' of the data added.
|
||||
assertTrue(row.equals(new Text(e.getValue())));
|
||||
assertTrue(row.equals(new Text(e.getValue().getValue())));
|
||||
if (COLUMNS[i].equals(new Text(column))) {
|
||||
legitColumn = true;
|
||||
break;
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.io.Text;
|
|||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/** memcache test case */
|
||||
public class TestHMemcache extends TestCase {
|
||||
|
@ -100,9 +101,8 @@ public class TestHMemcache extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void isExpectedRow(final int rowIndex, TreeMap<Text, byte []> row)
|
||||
private void isExpectedRowWithoutTimestamps(final int rowIndex, TreeMap<Text, byte[]> row)
|
||||
throws UnsupportedEncodingException {
|
||||
|
||||
int i = 0;
|
||||
for (Text colname: row.keySet()) {
|
||||
String expectedColname = getColumnName(rowIndex, i++).toString();
|
||||
|
@ -118,6 +118,16 @@ public class TestHMemcache extends TestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void isExpectedRow(final int rowIndex, TreeMap<Text, Cell> row)
|
||||
throws UnsupportedEncodingException {
|
||||
TreeMap<Text, byte[]> converted = new TreeMap<Text, byte[]>();
|
||||
for (Map.Entry<Text, Cell> entry : row.entrySet()) {
|
||||
converted.put(entry.getKey(),
|
||||
entry.getValue() == null ? null : entry.getValue().getValue());
|
||||
}
|
||||
isExpectedRowWithoutTimestamps(rowIndex, converted);
|
||||
}
|
||||
|
||||
/** Test getFull from memcache
|
||||
* @throws UnsupportedEncodingException
|
||||
*/
|
||||
|
@ -125,7 +135,7 @@ public class TestHMemcache extends TestCase {
|
|||
addRows(this.hmemcache);
|
||||
for (int i = 0; i < ROW_COUNT; i++) {
|
||||
HStoreKey hsk = new HStoreKey(getRowName(i));
|
||||
TreeMap<Text, byte []> all = new TreeMap<Text, byte[]>();
|
||||
TreeMap<Text, Cell> all = new TreeMap<Text, Cell>();
|
||||
this.hmemcache.getFull(hsk, all);
|
||||
isExpectedRow(i, all);
|
||||
}
|
||||
|
@ -157,7 +167,7 @@ public class TestHMemcache extends TestCase {
|
|||
for(Map.Entry<Text, byte []> e: results.entrySet() ) {
|
||||
row.put(e.getKey(), e.getValue());
|
||||
}
|
||||
isExpectedRow(i, row);
|
||||
isExpectedRowWithoutTimestamps(i, row);
|
||||
// Clear out set. Otherwise row results accumulate.
|
||||
results.clear();
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ implements RegionUnavailableListener {
|
|||
for (int k = FIRST_ROW; k <= NUM_VALS; k++) {
|
||||
Text rowlabel = new Text("row_" + k);
|
||||
|
||||
byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC);
|
||||
byte [] bodydata = region.get(rowlabel, CONTENTS_BASIC).getValue();
|
||||
assertNotNull(bodydata);
|
||||
String bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
||||
String teststr = CONTENTSTR + k;
|
||||
|
@ -157,7 +157,7 @@ implements RegionUnavailableListener {
|
|||
+ "), expected: '" + teststr + "' got: '" + bodystr + "'",
|
||||
bodystr, teststr);
|
||||
collabel = new Text(ANCHORNUM + k);
|
||||
bodydata = region.get(rowlabel, collabel);
|
||||
bodydata = region.get(rowlabel, collabel).getValue();
|
||||
bodystr = new String(bodydata, HConstants.UTF8_ENCODING).trim();
|
||||
teststr = ANCHORSTR + k;
|
||||
assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
|
||||
|
|
|
@ -137,7 +137,7 @@ public class TestScanner extends HBaseTestCase {
|
|||
|
||||
/** Use get to retrieve the HRegionInfo and validate it */
|
||||
private void getRegionInfo() throws IOException {
|
||||
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO);
|
||||
byte [] bytes = region.get(ROW_KEY, HConstants.COL_REGIONINFO).getValue();
|
||||
validateRegionInfo(bytes);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* {@Link TestHRegion} does a split but this TestCase adds testing of fast
|
||||
|
@ -182,10 +183,10 @@ public class TestSplit extends MultiRegionTable {
|
|||
private void assertGet(final HRegion r, final String family, final Text k)
|
||||
throws IOException {
|
||||
// Now I have k, get values out and assert they are as expected.
|
||||
byte [][] results = r.get(k, new Text(family),
|
||||
Cell[] results = r.get(k, new Text(family),
|
||||
Integer.MAX_VALUE);
|
||||
for (int j = 0; j < results.length; j++) {
|
||||
Text tmp = new Text(results[j]);
|
||||
Text tmp = new Text(results[j].getValue());
|
||||
// Row should be equal to value every time.
|
||||
assertEquals(k.toString(), tmp.toString());
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
|
|||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Tests user specifiable time stamps putting, getting and scanning. Also
|
||||
|
@ -195,9 +196,9 @@ public class TestTimestamp extends HBaseTestCase {
|
|||
private void assertOnlyLatest(final Incommon incommon,
|
||||
final long currentTime)
|
||||
throws IOException {
|
||||
byte [][] bytesBytes = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
||||
assertEquals(1, bytesBytes.length);
|
||||
long time = Writables.bytesToLong(bytesBytes[0]);
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
||||
assertEquals(1, cellValues.length);
|
||||
long time = Writables.bytesToLong(cellValues[0].getValue());
|
||||
assertEquals(time, currentTime);
|
||||
assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
|
||||
assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
|
||||
|
@ -214,20 +215,20 @@ public class TestTimestamp extends HBaseTestCase {
|
|||
private void assertVersions(final Incommon incommon, final long [] tss)
|
||||
throws IOException {
|
||||
// Assert that 'latest' is what we expect.
|
||||
byte [] bytes = incommon.get(ROW, COLUMN);
|
||||
byte [] bytes = incommon.get(ROW, COLUMN).getValue();
|
||||
assertEquals(Writables.bytesToLong(bytes), tss[0]);
|
||||
// Now assert that if we ask for multiple versions, that they come out in
|
||||
// order.
|
||||
byte [][] bytesBytes = incommon.get(ROW, COLUMN, tss.length);
|
||||
assertEquals(tss.length, bytesBytes.length);
|
||||
for (int i = 0; i < bytesBytes.length; i++) {
|
||||
long ts = Writables.bytesToLong(bytesBytes[i]);
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
|
||||
assertEquals(tss.length, cellValues.length);
|
||||
for (int i = 0; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Specify a timestamp get multiple versions.
|
||||
bytesBytes = incommon.get(ROW, COLUMN, tss[0], bytesBytes.length - 1);
|
||||
for (int i = 1; i < bytesBytes.length; i++) {
|
||||
long ts = Writables.bytesToLong(bytesBytes[i]);
|
||||
cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
|
||||
for (int i = 1; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Test scanner returns expected version
|
||||
|
|
Loading…
Reference in New Issue