HBASE-9549 KeyValue#parseColumn empty qualifier

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1524236 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
ndimiduk 2013-09-17 22:00:44 +00:00
parent 10aba8f306
commit eceab57bed
14 changed files with 206 additions and 150 deletions

View File

@ -61,7 +61,8 @@ import com.google.common.primitives.Longs;
* KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start * KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start
* interpreting the content as KeyValue. The KeyValue format inside a byte array is: * interpreting the content as KeyValue. The KeyValue format inside a byte array is:
* <code>&lt;keylength> &lt;valuelength> &lt;key> &lt;value></code> Key is further decomposed as: * <code>&lt;keylength> &lt;valuelength> &lt;key> &lt;value></code> Key is further decomposed as:
* <code>&lt;rowlength> &lt;row> &lt;columnfamilylength> &lt;columnfamily> &lt;columnqualifier> &lt;timestamp> &lt;keytype></code> * <code>&lt;rowlength> &lt;row> &lt;columnfamilylength> &lt;columnfamily> &lt;columnqualifier>
* &lt;timestamp> &lt;keytype></code>
* The <code>rowlength</code> maximum is <code>Short.MAX_SIZE</code>, column family length maximum * The <code>rowlength</code> maximum is <code>Short.MAX_SIZE</code>, column family length maximum
* is <code>Byte.MAX_SIZE</code>, and column qualifier + key length must be < * is <code>Byte.MAX_SIZE</code>, and column qualifier + key length must be <
* <code>Integer.MAX_SIZE</code>. The column does not contain the family/qualifier delimiter, * <code>Integer.MAX_SIZE</code>. The column does not contain the family/qualifier delimiter,
@ -1294,9 +1295,15 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
} }
/** /**
* Splits a column in family:qualifier form into separate byte arrays. * Splits a column in {@code family:qualifier} form into separate byte arrays. An empty qualifier
* (ie, {@code fam:}) is parsed as <code>{ fam, EMPTY_BYTE_ARRAY }</code> while no delimiter (ie,
* {@code fam}) is parsed as an array of one element, <code>{ fam }</code>.
* <p>
* Don't forget, HBase DOES support empty qualifiers. (see HBASE-9549)
* </p>
* <p> * <p>
* Not recommend to be used as this is old-style API. * Not recommend to be used as this is old-style API.
* </p>
* @param c The column. * @param c The column.
* @return The parsed column. * @return The parsed column.
*/ */
@ -1306,10 +1313,10 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
// If no delimiter, return array of size 1 // If no delimiter, return array of size 1
return new byte [][] { c }; return new byte [][] { c };
} else if(index == c.length - 1) { } else if(index == c.length - 1) {
// Only a family, return array size 1 // family with empty qualifier, return array size 2
byte [] family = new byte[c.length-1]; byte [] family = new byte[c.length-1];
System.arraycopy(c, 0, family, 0, family.length); System.arraycopy(c, 0, family, 0, family.length);
return new byte [][] { family }; return new byte [][] { family, HConstants.EMPTY_BYTE_ARRAY};
} }
// Family and column, return array size 2 // Family and column, return array size 2
final byte [][] result = new byte [2][]; final byte [][] result = new byte [2][];
@ -1317,8 +1324,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
System.arraycopy(c, 0, result[0], 0, index); System.arraycopy(c, 0, result[0], 0, index);
final int len = c.length - (index + 1); final int len = c.length - (index + 1);
result[1] = new byte[len]; result[1] = new byte[len];
System.arraycopy(c, index + 1 /*Skip delimiter*/, result[1], 0, System.arraycopy(c, index + 1 /* Skip delimiter */, result[1], 0, len);
len);
return result; return result;
} }

View File

@ -77,6 +77,11 @@ public class TestKeyValue extends TestCase {
// Test empty value and empty column -- both should work. (not empty fam) // Test empty value and empty column -- both should work. (not empty fam)
check(Bytes.toBytes(getName()), Bytes.toBytes(getName()), null, 1, null); check(Bytes.toBytes(getName()), Bytes.toBytes(getName()), null, 1, null);
check(HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes(getName()), null, 1, null); check(HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes(getName()), null, 1, null);
// empty qual is equivalent to null qual
assertEquals(
new KeyValue(Bytes.toBytes("rk"), Bytes.toBytes("fam"), null, 1, (byte[]) null),
new KeyValue(Bytes.toBytes("rk"), Bytes.toBytes("fam"),
HConstants.EMPTY_BYTE_ARRAY, 1, (byte[]) null));
} }
private void check(final byte [] row, final byte [] family, byte [] qualifier, private void check(final byte [] row, final byte [] family, byte [] qualifier,

View File

@ -54,7 +54,7 @@ implements Configurable {
public static final String SCAN_ROW_STOP = "hbase.mapreduce.scan.row.stop"; public static final String SCAN_ROW_STOP = "hbase.mapreduce.scan.row.stop";
/** Column Family to Scan */ /** Column Family to Scan */
public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family"; public static final String SCAN_COLUMN_FAMILY = "hbase.mapreduce.scan.column.family";
/** Space delimited list of columns to scan. */ /** Space delimited list of columns and column families to scan. */
public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns"; public static final String SCAN_COLUMNS = "hbase.mapreduce.scan.columns";
/** The timestamp used to filter columns with a specific timestamp. */ /** The timestamp used to filter columns with a specific timestamp. */
public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp"; public static final String SCAN_TIMESTAMP = "hbase.mapreduce.scan.timestamp";
@ -159,30 +159,34 @@ implements Configurable {
/** /**
* Parses a combined family and qualifier and adds either both or just the * Parses a combined family and qualifier and adds either both or just the
* family in case there is not qualifier. This assumes the older colon * family in case there is no qualifier. This assumes the older colon
* divided notation, e.g. "data:contents" or "meta:". * divided notation, e.g. "family:qualifier".
* <p>
* Note: It will through an error when the colon is missing.
* *
* @param scan The Scan to update.
* @param familyAndQualifier family and qualifier * @param familyAndQualifier family and qualifier
* @return A reference to this instance. * @return A reference to this instance.
* @throws IllegalArgumentException When the colon is missing. * @throws IllegalArgumentException When familyAndQualifier is invalid.
*/ */
private static void addColumn(Scan scan, byte[] familyAndQualifier) { private static void addColumn(Scan scan, byte[] familyAndQualifier) {
byte [][] fq = KeyValue.parseColumn(familyAndQualifier); byte [][] fq = KeyValue.parseColumn(familyAndQualifier);
if (fq.length > 1 && fq[1] != null && fq[1].length > 0) { if (fq.length == 1) {
scan.addFamily(fq[0]);
} else if (fq.length == 2) {
scan.addColumn(fq[0], fq[1]); scan.addColumn(fq[0], fq[1]);
} else { } else {
scan.addFamily(fq[0]); throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
} }
} }
/** /**
* Adds an array of columns specified using old format, family:qualifier. * Adds an array of columns specified using old format, family:qualifier.
* <p> * <p>
* Overrides previous calls to addFamily for any families in the input. * Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the
* input.
* *
* @param columns array of columns, formatted as <pre>family:qualifier</pre> * @param scan The Scan to update.
* @param columns array of columns, formatted as <code>family:qualifier</code>
* @see Scan#addColumn(byte[], byte[])
*/ */
public static void addColumns(Scan scan, byte [][] columns) { public static void addColumns(Scan scan, byte [][] columns) {
for (byte[] column : columns) { for (byte[] column : columns) {
@ -191,13 +195,10 @@ implements Configurable {
} }
/** /**
* Convenience method to help parse old style (or rather user entry on the * Convenience method to parse a string representation of an array of column specifiers.
* command line) column definitions, e.g. "data:contents mime:". The columns
* must be space delimited and always have a colon (":") to denote family
* and qualifier.
* *
* @param scan The Scan to update.
* @param columns The columns to parse. * @param columns The columns to parse.
* @return A reference to this instance.
*/ */
private static void addColumns(Scan scan, String columns) { private static void addColumns(Scan scan, String columns) {
String[] cols = columns.split(" "); String[] cols = columns.split(" ");

View File

@ -63,8 +63,8 @@ import org.apache.hadoop.net.DNS;
* Bytes.toBytes("exampleTable")); * Bytes.toBytes("exampleTable"));
* // mandatory * // mandatory
* setHTable(exampleTable); * setHTable(exampleTable);
* Text[] inputColumns = new byte [][] { Bytes.toBytes("columnA"), * Text[] inputColumns = new byte [][] { Bytes.toBytes("cf1:columnA"),
* Bytes.toBytes("columnB") }; * Bytes.toBytes("cf2") };
* // mandatory * // mandatory
* setInputColumns(inputColumns); * setInputColumns(inputColumns);
* RowFilterInterface exampleFilter = new RegExpRowFilter("keyPrefix.*"); * RowFilterInterface exampleFilter = new RegExpRowFilter("keyPrefix.*");

View File

@ -217,11 +217,12 @@ public class RowResource extends ResourceBase {
.build(); .build();
} }
byte [][] parts = KeyValue.parseColumn(col); byte [][] parts = KeyValue.parseColumn(col);
if (parts.length == 2 && parts[1].length > 0) { if (parts.length != 2) {
put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue()); return Response.status(Response.Status.BAD_REQUEST)
} else { .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
put.add(parts[0], null, cell.getTimestamp(), cell.getValue()); .build();
} }
put.add(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
} }
puts.add(put); puts.add(put);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
@ -285,11 +286,12 @@ public class RowResource extends ResourceBase {
} }
Put put = new Put(row); Put put = new Put(row);
byte parts[][] = KeyValue.parseColumn(column); byte parts[][] = KeyValue.parseColumn(column);
if (parts.length == 2 && parts[1].length > 0) { if (parts.length != 2) {
put.add(parts[0], parts[1], timestamp, message); return Response.status(Response.Status.BAD_REQUEST)
} else { .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
put.add(parts[0], null, timestamp, message); .build();
} }
put.add(parts[0], parts[1], timestamp, message);
table = servlet.getTable(tableResource.getName()); table = servlet.getTable(tableResource.getName());
table.put(put); table.put(put);
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
@ -373,16 +375,24 @@ public class RowResource extends ResourceBase {
for (byte[] column: rowspec.getColumns()) { for (byte[] column: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(column); byte[][] split = KeyValue.parseColumn(column);
if (rowspec.hasTimestamp()) { if (rowspec.hasTimestamp()) {
if (split.length == 2 && split[1].length != 0) { if (split.length == 1) {
delete.deleteFamily(split[0], rowspec.getTimestamp());
} else if (split.length == 2) {
delete.deleteColumns(split[0], split[1], rowspec.getTimestamp()); delete.deleteColumns(split[0], split[1], rowspec.getTimestamp());
} else { } else {
delete.deleteFamily(split[0], rowspec.getTimestamp()); return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
} }
} else { } else {
if (split.length == 2 && split[1].length != 0) { if (split.length == 1) {
delete.deleteFamily(split[0]);
} else if (split.length == 2) {
delete.deleteColumns(split[0], split[1]); delete.deleteColumns(split[0], split[1]);
} else { } else {
delete.deleteFamily(split[0]); return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
} }
} }
} }
@ -441,7 +451,11 @@ public class RowResource extends ResourceBase {
CellModel valueToCheckCell = cellModels.get(cellModelCount - 1); CellModel valueToCheckCell = cellModels.get(cellModelCount - 1);
byte[] valueToCheckColumn = valueToCheckCell.getColumn(); byte[] valueToCheckColumn = valueToCheckCell.getColumn();
byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn); byte[][] valueToPutParts = KeyValue.parseColumn(valueToCheckColumn);
if (valueToPutParts.length == 2 && valueToPutParts[1].length > 0) { if (valueToPutParts.length != 2) {
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
}
CellModel valueToPutCell = null; CellModel valueToPutCell = null;
for (int i = 0, n = cellModelCount - 1; i < n ; i++) { for (int i = 0, n = cellModelCount - 1; i < n ; i++) {
if(Bytes.equals(cellModels.get(i).getColumn(), if(Bytes.equals(cellModels.get(i).getColumn(),
@ -450,19 +464,13 @@ public class RowResource extends ResourceBase {
break; break;
} }
} }
if (valueToPutCell != null) { if (null == valueToPutCell) {
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
}
put.add(valueToPutParts[0], valueToPutParts[1], valueToPutCell put.add(valueToPutParts[0], valueToPutParts[1], valueToPutCell
.getTimestamp(), valueToPutCell.getValue()); .getTimestamp(), valueToPutCell.getValue());
} else {
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
}
} else {
return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build();
}
table = servlet.getTable(this.tableResource.getName()); table = servlet.getTable(this.tableResource.getName());
boolean retValue = table.checkAndPut(key, valueToPutParts[0], boolean retValue = table.checkAndPut(key, valueToPutParts[0],
@ -527,13 +535,12 @@ public class RowResource extends ResourceBase {
} }
} }
byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn); byte[][] parts = KeyValue.parseColumn(valueToDeleteColumn);
if (parts.length == 2 && parts[1].length > 0) { if (parts.length != 2) {
delete.deleteColumns(parts[0], parts[1]);
} else {
return Response.status(Response.Status.BAD_REQUEST) return Response.status(Response.Status.BAD_REQUEST)
.type(MIMETYPE_TEXT).entity("Bad request" + CRLF) .type(MIMETYPE_TEXT).entity("Bad request" + CRLF)
.build(); .build();
} }
delete.deleteColumns(parts[0], parts[1]);
table = servlet.getTable(tableResource.getName()); table = servlet.getTable(tableResource.getName());
boolean retValue = table.checkAndDelete(key, parts[0], parts[1], boolean retValue = table.checkAndDelete(key, parts[0], parts[1],

View File

@ -50,10 +50,12 @@ public class RowResultGenerator extends ResultGenerator {
if (rowspec.hasColumns()) { if (rowspec.hasColumns()) {
for (byte[] col: rowspec.getColumns()) { for (byte[] col: rowspec.getColumns()) {
byte[][] split = KeyValue.parseColumn(col); byte[][] split = KeyValue.parseColumn(col);
if (split.length == 2 && split[1].length != 0) { if (split.length == 1) {
get.addFamily(split[0]);
} else if (split.length == 2) {
get.addColumn(split[0], split[1]); get.addColumn(split[0], split[1]);
} else { } else {
get.addFamily(split[0]); throw new IllegalArgumentException("Invalid column specifier.");
} }
} }
} }

View File

@ -25,7 +25,6 @@ import java.util.Collection;
import java.util.TreeSet; import java.util.TreeSet;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -122,11 +121,7 @@ public class RowSpec {
} }
String s = URLDecoder.decode(column.toString(), String s = URLDecoder.decode(column.toString(),
HConstants.UTF8_ENCODING); HConstants.UTF8_ENCODING);
if (!s.contains(":")) {
this.columns.add(Bytes.toBytes(s + ":"));
} else {
this.columns.add(Bytes.toBytes(s)); this.columns.add(Bytes.toBytes(s));
}
column.setLength(0); column.setLength(0);
i++; i++;
continue; continue;
@ -136,15 +131,11 @@ public class RowSpec {
} }
i++; i++;
// trailing list entry // trailing list entry
if (column.length() > 1) { if (column.length() > 0) {
String s = URLDecoder.decode(column.toString(), String s = URLDecoder.decode(column.toString(),
HConstants.UTF8_ENCODING); HConstants.UTF8_ENCODING);
if (!s.contains(":")) {
this.columns.add(Bytes.toBytes(s + ":"));
} else {
this.columns.add(Bytes.toBytes(s)); this.columns.add(Bytes.toBytes(s));
} }
}
} catch (IndexOutOfBoundsException e) { } catch (IndexOutOfBoundsException e) {
throw new IllegalArgumentException(e); throw new IllegalArgumentException(e);
} catch (UnsupportedEncodingException e) { } catch (UnsupportedEncodingException e) {

View File

@ -76,10 +76,12 @@ public class ScannerResultGenerator extends ResultGenerator {
byte[][] columns = rowspec.getColumns(); byte[][] columns = rowspec.getColumns();
for (byte[] column: columns) { for (byte[] column: columns) {
byte[][] split = KeyValue.parseColumn(column); byte[][] split = KeyValue.parseColumn(column);
if (split.length > 1 && (split[1] != null && split[1].length != 0)) { if (split.length == 1) {
scan.addFamily(split[0]);
} else if (split.length == 2) {
scan.addColumn(split[0], split[1]); scan.addColumn(split[0], split[1]);
} else { } else {
scan.addFamily(split[0]); throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
} }
} }
} }

View File

@ -28,17 +28,8 @@ import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -48,17 +39,22 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTableInterface; import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.rest.Constants; import org.apache.hadoop.hbase.rest.Constants;
import org.apache.hadoop.hbase.rest.model.CellModel; import org.apache.hadoop.hbase.rest.model.CellModel;
import org.apache.hadoop.hbase.rest.model.CellSetModel; import org.apache.hadoop.hbase.rest.model.CellSetModel;
@ -66,6 +62,10 @@ import org.apache.hadoop.hbase.rest.model.RowModel;
import org.apache.hadoop.hbase.rest.model.ScannerModel; import org.apache.hadoop.hbase.rest.model.ScannerModel;
import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
import com.google.protobuf.Service;
import com.google.protobuf.ServiceException;
/** /**
* HTable interface to remote tables accessed via REST gateway * HTable interface to remote tables accessed via REST gateway
@ -116,8 +116,8 @@ public class RemoteHTable implements HTableInterface {
} }
} }
} else { } else {
// this is an unqualified family. append the family name and NO ':'
sb.append(Bytes.toStringBinary((byte[])e.getKey())); sb.append(Bytes.toStringBinary((byte[])e.getKey()));
sb.append(':');
} }
if (i.hasNext()) { if (i.hasNext()) {
sb.append(','); sb.append(',');
@ -172,7 +172,14 @@ public class RemoteHTable implements HTableInterface {
for (CellModel cell: row.getCells()) { for (CellModel cell: row.getCells()) {
byte[][] split = KeyValue.parseColumn(cell.getColumn()); byte[][] split = KeyValue.parseColumn(cell.getColumn());
byte[] column = split[0]; byte[] column = split[0];
byte[] qualifier = split.length > 1 ? split[1] : null; byte[] qualifier = null;
if (split.length == 1) {
qualifier = HConstants.EMPTY_BYTE_ARRAY;
} else if (split.length == 2) {
qualifier = split[1];
} else {
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
}
kvs.add(new KeyValue(row.getKey(), column, qualifier, kvs.add(new KeyValue(row.getKey(), column, qualifier,
cell.getTimestamp(), cell.getValue())); cell.getTimestamp(), cell.getValue()));
} }

View File

@ -197,12 +197,10 @@ public class IncrementCoalescer implements IncrementCoalescerMBean {
private boolean internalQueueTincrement(TIncrement inc) throws TException { private boolean internalQueueTincrement(TIncrement inc) throws TException {
byte[][] famAndQf = KeyValue.parseColumn(inc.getColumn()); byte[][] famAndQf = KeyValue.parseColumn(inc.getColumn());
if (famAndQf.length < 1) return false; if (famAndQf.length != 2) return false;
byte[] qual = famAndQf.length == 1 ? new byte[0] : famAndQf[1];
return internalQueueIncrement(inc.getTable(), inc.getRow(), famAndQf[0], qual, return internalQueueIncrement(inc.getTable(), inc.getRow(), famAndQf[0], famAndQf[1],
inc.getAmmount()); inc.getAmmount());
} }
private boolean internalQueueIncrement(byte[] tableName, byte[] rowKey, byte[] fam, private boolean internalQueueIncrement(byte[] tableName, byte[] rowKey, byte[] fam,

View File

@ -652,11 +652,21 @@ public class ThriftServerRunner implements Runnable {
throws IOError { throws IOError {
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column)); byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) { if (famAndQf.length == 1) {
return get(tableName, row, famAndQf[0], new byte[0], attributes); return get(tableName, row, famAndQf[0], null, attributes);
} }
if (famAndQf.length == 2) {
return get(tableName, row, famAndQf[0], famAndQf[1], attributes); return get(tableName, row, famAndQf[0], famAndQf[1], attributes);
} }
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
}
/**
* Note: this internal interface is slightly different from public APIs in regard to handling
* of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
* we respect qual == null as a request for the entire column family. The caller (
* {@link #get(ByteBuffer, ByteBuffer, ByteBuffer, Map)}) interface IS consistent in that the
* column is parse like normal.
*/
protected List<TCell> get(ByteBuffer tableName, protected List<TCell> get(ByteBuffer tableName,
ByteBuffer row, ByteBuffer row,
byte[] family, byte[] family,
@ -666,7 +676,7 @@ public class ThriftServerRunner implements Runnable {
HTable table = getTable(tableName); HTable table = getTable(tableName);
Get get = new Get(getBytes(row)); Get get = new Get(getBytes(row));
addAttributes(get, attributes); addAttributes(get, attributes);
if (qualifier == null || qualifier.length == 0) { if (qualifier == null) {
get.addFamily(family); get.addFamily(family);
} else { } else {
get.addColumn(family, qualifier); get.addColumn(family, qualifier);
@ -681,27 +691,38 @@ public class ThriftServerRunner implements Runnable {
@Deprecated @Deprecated
@Override @Override
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
ByteBuffer column, int numVersions, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column)); byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if(famAndQf.length == 1) { if(famAndQf.length == 1) {
return getVer(tableName, row, famAndQf[0], return getVer(tableName, row, famAndQf[0], null, numVersions, attributes);
new byte[0], numVersions, attributes);
} }
return getVer(tableName, row, if (famAndQf.length == 2) {
famAndQf[0], famAndQf[1], numVersions, attributes); return getVer(tableName, row, famAndQf[0], famAndQf[1], numVersions, attributes);
}
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
} }
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, /**
byte[] family, * Note: this public interface is slightly different from public Java APIs in regard to
byte[] qualifier, int numVersions, * handling of the qualifier. Here we differ from the public Java API in that null != byte[0].
Map<ByteBuffer, ByteBuffer> attributes) throws IOError { * Rather, we respect qual == null as a request for the entire column family. If you want to
* access the entire column family, use
* {@link #getVer(ByteBuffer, ByteBuffer, ByteBuffer, int, Map)} with a {@code column} value
* that lacks a {@code ':'}.
*/
public List<TCell> getVer(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
try { try {
HTable table = getTable(tableName); HTable table = getTable(tableName);
Get get = new Get(getBytes(row)); Get get = new Get(getBytes(row));
addAttributes(get, attributes); addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier); get.addColumn(family, qualifier);
}
get.setMaxVersions(numVersions); get.setMaxVersions(numVersions);
Result result = table.get(get); Result result = table.get(get);
return ThriftUtilities.cellFromHBase(result.rawCells()); return ThriftUtilities.cellFromHBase(result.rawCells());
@ -713,30 +734,38 @@ public class ThriftServerRunner implements Runnable {
@Deprecated @Deprecated
@Override @Override
public List<TCell> getVerTs(ByteBuffer tableName, public List<TCell> getVerTs(ByteBuffer tableName, ByteBuffer row, ByteBuffer column,
ByteBuffer row, long timestamp, int numVersions, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
ByteBuffer column,
long timestamp,
int numVersions,
Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column)); byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) { if (famAndQf.length == 1) {
return getVerTs(tableName, row, famAndQf[0], new byte[0], timestamp, return getVerTs(tableName, row, famAndQf[0], null, timestamp, numVersions, attributes);
numVersions, attributes);
} }
return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, if (famAndQf.length == 2) {
numVersions, attributes); return getVerTs(tableName, row, famAndQf[0], famAndQf[1], timestamp, numVersions,
attributes);
}
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
} }
protected List<TCell> getVerTs(ByteBuffer tableName, /**
ByteBuffer row, byte [] family, * Note: this internal interface is slightly different from public APIs in regard to handling
byte [] qualifier, long timestamp, int numVersions, * of the qualifier. Here we differ from the public Java API in that null != byte[0]. Rather,
Map<ByteBuffer, ByteBuffer> attributes) throws IOError { * we respect qual == null as a request for the entire column family. The caller (
* {@link #getVerTs(ByteBuffer, ByteBuffer, ByteBuffer, long, int, Map)}) interface IS
* consistent in that the column is parse like normal.
*/
protected List<TCell> getVerTs(ByteBuffer tableName, ByteBuffer row, byte[] family,
byte[] qualifier, long timestamp, int numVersions, Map<ByteBuffer, ByteBuffer> attributes)
throws IOError {
try { try {
HTable table = getTable(tableName); HTable table = getTable(tableName);
Get get = new Get(getBytes(row)); Get get = new Get(getBytes(row));
addAttributes(get, attributes); addAttributes(get, attributes);
if (null == qualifier) {
get.addFamily(family);
} else {
get.addColumn(family, qualifier); get.addColumn(family, qualifier);
}
get.setTimeRange(0, timestamp); get.setTimeRange(0, timestamp);
get.setMaxVersions(numVersions); get.setMaxVersions(numVersions);
Result result = table.get(get); Result result = table.get(get);
@ -1002,9 +1031,8 @@ public class ThriftServerRunner implements Runnable {
: Durability.SKIP_WAL); : Durability.SKIP_WAL);
} else { } else {
if(famAndQf.length == 1) { if(famAndQf.length == 1) {
put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, LOG.warn("No column qualifier specified. Delete is the only mutation supported "
m.value != null ? getBytes(m.value) + "over the whole column family.");
: HConstants.EMPTY_BYTE_ARRAY);
} else { } else {
put.add(famAndQf[0], famAndQf[1], put.add(famAndQf[0], famAndQf[1],
m.value != null ? getBytes(m.value) m.value != null ? getBytes(m.value)
@ -1061,13 +1089,15 @@ public class ThriftServerRunner implements Runnable {
: Durability.SKIP_WAL); : Durability.SKIP_WAL);
} else { } else {
if (famAndQf.length == 1) { if (famAndQf.length == 1) {
put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, LOG.warn("No column qualifier specified. Delete is the only mutation supported "
m.value != null ? getBytes(m.value) + "over the whole column family.");
: HConstants.EMPTY_BYTE_ARRAY); }
} else { if (famAndQf.length == 2) {
put.add(famAndQf[0], famAndQf[1], put.add(famAndQf[0], famAndQf[1],
m.value != null ? getBytes(m.value) m.value != null ? getBytes(m.value)
: HConstants.EMPTY_BYTE_ARRAY); : HConstants.EMPTY_BYTE_ARRAY);
} else {
throw new IllegalArgumentException("Invalid famAndQf provided.");
} }
put.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL); put.setDurability(m.writeToWAL ? Durability.SYNC_WAL : Durability.SKIP_WAL);
} }
@ -1102,8 +1132,7 @@ public class ThriftServerRunner implements Runnable {
throws IOError, IllegalArgument, TException { throws IOError, IllegalArgument, TException {
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column)); byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if(famAndQf.length == 1) { if(famAndQf.length == 1) {
return atomicIncrement(tableName, row, famAndQf[0], new byte[0], return atomicIncrement(tableName, row, famAndQf[0], HConstants.EMPTY_BYTE_ARRAY, amount);
amount);
} }
return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount); return atomicIncrement(tableName, row, famAndQf[0], famAndQf[1], amount);
} }

View File

@ -198,9 +198,8 @@ public class ThriftUtilities {
public static Increment incrementFromThrift(TIncrement tincrement) { public static Increment incrementFromThrift(TIncrement tincrement) {
Increment inc = new Increment(tincrement.getRow()); Increment inc = new Increment(tincrement.getRow());
byte[][] famAndQf = KeyValue.parseColumn(tincrement.getColumn()); byte[][] famAndQf = KeyValue.parseColumn(tincrement.getColumn());
if (famAndQf.length <1 ) return null; if (famAndQf.length != 2) return null;
byte[] qual = famAndQf.length == 1 ? new byte[0]: famAndQf[1]; inc.addColumn(famAndQf[0], famAndQf[1], tincrement.getAmmount());
inc.addColumn(famAndQf[0], qual, tincrement.getAmmount());
return inc; return inc;
} }
} }

View File

@ -28,8 +28,6 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -54,7 +52,6 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class) @Category(MediumTests.class)
public class TestRemoteTable { public class TestRemoteTable {
private static final Log LOG = LogFactory.getLog(TestRemoteTable.class);
private static final String TABLE = "TestRemoteTable"; private static final String TABLE = "TestRemoteTable";
private static final byte[] ROW_1 = Bytes.toBytes("testrow1"); private static final byte[] ROW_1 = Bytes.toBytes("testrow1");
private static final byte[] ROW_2 = Bytes.toBytes("testrow2"); private static final byte[] ROW_2 = Bytes.toBytes("testrow2");
@ -82,13 +79,18 @@ public class TestRemoteTable {
TEST_UTIL.startMiniCluster(); TEST_UTIL.startMiniCluster();
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration()); REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) { if (admin.tableExists(TABLE)) {
if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
admin.deleteTable(TABLE);
}
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE)); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(TABLE));
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3)); htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
admin.createTable(htd); admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE); HTable table = null;
try {
table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
Put put = new Put(ROW_1); Put put = new Put(ROW_1);
put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1); put.add(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
table.put(put); table.put(put);
@ -98,6 +100,8 @@ public class TestRemoteTable {
put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2); put.add(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
table.put(put); table.put(put);
table.flushCommits(); table.flushCommits();
} finally {
if (null != table) table.close();
} }
remoteTable = new RemoteHTable( remoteTable = new RemoteHTable(
new Client(new Cluster().add("localhost", new Client(new Cluster().add("localhost",
@ -114,9 +118,14 @@ public class TestRemoteTable {
@Test @Test
public void testGetTableDescriptor() throws IOException { public void testGetTableDescriptor() throws IOException {
HTableDescriptor local = new HTable(TEST_UTIL.getConfiguration(), HTable table = null;
TABLE).getTableDescriptor(); try {
table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
HTableDescriptor local = table.getTableDescriptor();
assertEquals(remoteTable.getTableDescriptor(), local); assertEquals(remoteTable.getTableDescriptor(), local);
} finally {
if (null != table) table.close();
}
} }
@Test @Test

View File

@ -78,10 +78,10 @@ module Hbase
assert_nil(qual) assert_nil(qual)
end end
define_test "parse_column_name should not return a qualifier for family-only column specifiers" do define_test "parse_column_name should support and empty column qualifier" do
col, qual = table('hbase:meta').parse_column_name('foo:') col, qual = table('hbase:meta').parse_column_name('foo:')
assert_not_nil(col) assert_not_nil(col)
assert_nil(qual) assert_not_nil(qual)
end end
define_test "parse_column_name should return a qualifier for family:qualifier column specifiers" do define_test "parse_column_name should return a qualifier for family:qualifier column specifiers" do