HBASE-22590 Remove the deprecated methods in Table interface (#309)
Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com> Signed-off-by: Guanghao <zghao@apache.org>
This commit is contained in:
parent
ed30909d27
commit
9b413cf262
|
@ -116,7 +116,7 @@ public class TestBackupMerge extends TestBackupBase {
|
||||||
tablesRestoreIncMultiple, tablesMapIncMultiple, true));
|
tablesRestoreIncMultiple, tablesMapIncMultiple, true));
|
||||||
|
|
||||||
Table hTable = conn.getTable(table1_restore);
|
Table hTable = conn.getTable(table1_restore);
|
||||||
LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
|
LOG.debug("After incremental restore: " + hTable.getDescriptor());
|
||||||
int countRows = TEST_UTIL.countRows(hTable, famName);
|
int countRows = TEST_UTIL.countRows(hTable, famName);
|
||||||
LOG.debug("f1 has " + countRows + " rows");
|
LOG.debug("f1 has " + countRows + " rows");
|
||||||
Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
|
Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows);
|
||||||
|
|
|
@ -334,7 +334,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
|
||||||
tablesRestoreIncMultiple, tablesMapIncMultiple, true));
|
tablesRestoreIncMultiple, tablesMapIncMultiple, true));
|
||||||
|
|
||||||
Table hTable = conn.getTable(table1_restore);
|
Table hTable = conn.getTable(table1_restore);
|
||||||
LOG.debug("After incremental restore: " + hTable.getTableDescriptor());
|
LOG.debug("After incremental restore: " + hTable.getDescriptor());
|
||||||
LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
|
LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
|
||||||
Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
|
Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS);
|
||||||
|
|
||||||
|
|
|
@ -659,22 +659,6 @@ public class HTable implements Table {
|
||||||
callWithRetries(callable, this.operationTimeoutMs);
|
callWithRetries(callable, this.operationTimeoutMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier,
|
|
||||||
final byte [] value, final Put put) throws IOException {
|
|
||||||
return doCheckAndPut(row, family, qualifier, CompareOperator.EQUAL.name(), value, null, put);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier,
|
|
||||||
final CompareOperator op, final byte [] value, final Put put) throws IOException {
|
|
||||||
// The name of the operators in CompareOperator are intentionally those of the
|
|
||||||
// operators in the filter's CompareOp enum.
|
|
||||||
return doCheckAndPut(row, family, qualifier, op.name(), value, null, put);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean doCheckAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
|
private boolean doCheckAndPut(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||||
final String opName, final byte[] value, final TimeRange timeRange, final Put put)
|
final String opName, final byte[] value, final TimeRange timeRange, final Put put)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -695,21 +679,6 @@ public class HTable implements Table {
|
||||||
.callWithRetries(callable, this.operationTimeoutMs);
|
.callWithRetries(callable, this.operationTimeoutMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
|
||||||
final byte[] value, final Delete delete) throws IOException {
|
|
||||||
return doCheckAndDelete(row, family, qualifier, CompareOperator.EQUAL.name(), value, null,
|
|
||||||
delete);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
|
||||||
final CompareOperator op, final byte[] value, final Delete delete) throws IOException {
|
|
||||||
return doCheckAndDelete(row, family, qualifier, op.name(), value, null, delete);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean doCheckAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
private boolean doCheckAndDelete(final byte[] row, final byte[] family, final byte[] qualifier,
|
||||||
final String opName, final byte[] value, final TimeRange timeRange, final Delete delete)
|
final String opName, final byte[] value, final TimeRange timeRange, final Delete delete)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -801,13 +770,6 @@ public class HTable implements Table {
|
||||||
return ((Result)results[0]).getExists();
|
return ((Result)results[0]).getExists();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier,
|
|
||||||
final CompareOperator op, final byte [] value, final RowMutations rm) throws IOException {
|
|
||||||
return doCheckAndMutate(row, family, qualifier, op.name(), value, null, rm);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean exists(final Get get) throws IOException {
|
public boolean exists(final Get get) throws IOException {
|
||||||
Result r = get(get, true);
|
Result r = get(get, true);
|
||||||
|
@ -981,70 +943,21 @@ public class HTable implements Table {
|
||||||
return unit.convert(rpcTimeoutMs, TimeUnit.MILLISECONDS);
|
return unit.convert(rpcTimeoutMs, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getRpcTimeout() {
|
|
||||||
return rpcTimeoutMs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setRpcTimeout(int rpcTimeout) {
|
|
||||||
setReadRpcTimeout(rpcTimeout);
|
|
||||||
setWriteRpcTimeout(rpcTimeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getReadRpcTimeout(TimeUnit unit) {
|
public long getReadRpcTimeout(TimeUnit unit) {
|
||||||
return unit.convert(readRpcTimeoutMs, TimeUnit.MILLISECONDS);
|
return unit.convert(readRpcTimeoutMs, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getReadRpcTimeout() {
|
|
||||||
return readRpcTimeoutMs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setReadRpcTimeout(int readRpcTimeout) {
|
|
||||||
this.readRpcTimeoutMs = readRpcTimeout;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getWriteRpcTimeout(TimeUnit unit) {
|
public long getWriteRpcTimeout(TimeUnit unit) {
|
||||||
return unit.convert(writeRpcTimeoutMs, TimeUnit.MILLISECONDS);
|
return unit.convert(writeRpcTimeoutMs, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getWriteRpcTimeout() {
|
|
||||||
return writeRpcTimeoutMs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setWriteRpcTimeout(int writeRpcTimeout) {
|
|
||||||
this.writeRpcTimeoutMs = writeRpcTimeout;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getOperationTimeout(TimeUnit unit) {
|
public long getOperationTimeout(TimeUnit unit) {
|
||||||
return unit.convert(operationTimeoutMs, TimeUnit.MILLISECONDS);
|
return unit.convert(operationTimeoutMs, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getOperationTimeout() {
|
|
||||||
return operationTimeoutMs;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setOperationTimeout(int operationTimeout) {
|
|
||||||
this.operationTimeoutMs = operationTimeout;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return tableName + ";" + connection;
|
return tableName + ";" + connection;
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.commons.lang3.NotImplementedException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CompareOperator;
|
import org.apache.hadoop.hbase.CompareOperator;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
||||||
import org.apache.hadoop.hbase.io.TimeRange;
|
import org.apache.hadoop.hbase.io.TimeRange;
|
||||||
|
@ -66,23 +65,6 @@ public interface Table extends Closeable {
|
||||||
*/
|
*/
|
||||||
Configuration getConfiguration();
|
Configuration getConfiguration();
|
||||||
|
|
||||||
/**
|
|
||||||
* Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table.
|
|
||||||
* @throws java.io.IOException if a remote or network exception occurs.
|
|
||||||
* @deprecated since 2.0 version and will be removed in 3.0 version.
|
|
||||||
* use {@link #getDescriptor()}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default HTableDescriptor getTableDescriptor() throws IOException {
|
|
||||||
TableDescriptor descriptor = getDescriptor();
|
|
||||||
|
|
||||||
if (descriptor instanceof HTableDescriptor) {
|
|
||||||
return (HTableDescriptor)descriptor;
|
|
||||||
} else {
|
|
||||||
return new HTableDescriptor(descriptor);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
|
* Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table.
|
||||||
* @throws java.io.IOException if a remote or network exception occurs.
|
* @throws java.io.IOException if a remote or network exception occurs.
|
||||||
|
@ -131,24 +113,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Test for the existence of columns in the table, as specified by the Gets.
|
|
||||||
* This will return an array of booleans. Each value will be true if the related Get matches
|
|
||||||
* one or more keys, false if not.
|
|
||||||
* This is a server-side call so it prevents any data from being transferred to
|
|
||||||
* the client.
|
|
||||||
*
|
|
||||||
* @param gets the Gets
|
|
||||||
* @return Array of boolean. True if the specified Get matches one or more keys, false if not.
|
|
||||||
* @throws IOException e
|
|
||||||
* @deprecated since 2.0 version and will be removed in 3.0 version.
|
|
||||||
* use {@link #exists(List)}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean[] existsAll(List<Get> gets) throws IOException {
|
|
||||||
return exists(gets);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
|
* Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations.
|
||||||
* The ordering of execution of the actions is not defined. Meaning if you do a Put and a
|
* The ordering of execution of the actions is not defined. Meaning if you do a Put and a
|
||||||
|
@ -284,55 +248,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected
|
|
||||||
* value. If it does, it adds the put. If the passed value is null, the check
|
|
||||||
* is for the lack of column (ie: non-existance)
|
|
||||||
*
|
|
||||||
* @param row to check
|
|
||||||
* @param family column family to check
|
|
||||||
* @param qualifier column qualifier to check
|
|
||||||
* @param value the expected value
|
|
||||||
* @param put data to put if check succeeds
|
|
||||||
* @throws IOException e
|
|
||||||
* @return true if the new put was executed, false otherwise
|
|
||||||
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
|
|
||||||
throws IOException {
|
|
||||||
return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected
|
|
||||||
* value. If it does, it adds the put. If the passed value is null, the check
|
|
||||||
* is for the lack of column (ie: non-existence)
|
|
||||||
*
|
|
||||||
* The expected value argument of this call is on the left and the current
|
|
||||||
* value of the cell is on the right side of the comparison operator.
|
|
||||||
*
|
|
||||||
* Ie. eg. GREATER operator means expected value > existing <=> add the put.
|
|
||||||
*
|
|
||||||
* @param row to check
|
|
||||||
* @param family column family to check
|
|
||||||
* @param qualifier column qualifier to check
|
|
||||||
* @param op comparison operator to use
|
|
||||||
* @param value the expected value
|
|
||||||
* @param put data to put if check succeeds
|
|
||||||
* @throws IOException e
|
|
||||||
* @return true if the new put was executed, false otherwise
|
|
||||||
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
|
|
||||||
byte[] value, Put put) throws IOException {
|
|
||||||
RowMutations mutations = new RowMutations(put.getRow(), 1);
|
|
||||||
mutations.add(put);
|
|
||||||
|
|
||||||
return checkAndMutate(row, family, qualifier, op, value, mutations);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes the specified cells/row.
|
* Deletes the specified cells/row.
|
||||||
*
|
*
|
||||||
|
@ -371,55 +286,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected
|
|
||||||
* value. If it does, it adds the delete. If the passed value is null, the
|
|
||||||
* check is for the lack of column (ie: non-existance)
|
|
||||||
*
|
|
||||||
* @param row to check
|
|
||||||
* @param family column family to check
|
|
||||||
* @param qualifier column qualifier to check
|
|
||||||
* @param value the expected value
|
|
||||||
* @param delete data to delete if check succeeds
|
|
||||||
* @throws IOException e
|
|
||||||
* @return true if the new delete was executed, false otherwise
|
|
||||||
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
byte[] value, Delete delete) throws IOException {
|
|
||||||
return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected
|
|
||||||
* value. If it does, it adds the delete. If the passed value is null, the
|
|
||||||
* check is for the lack of column (ie: non-existence)
|
|
||||||
*
|
|
||||||
* The expected value argument of this call is on the left and the current
|
|
||||||
* value of the cell is on the right side of the comparison operator.
|
|
||||||
*
|
|
||||||
* Ie. eg. GREATER operator means expected value > existing <=> add the delete.
|
|
||||||
*
|
|
||||||
* @param row to check
|
|
||||||
* @param family column family to check
|
|
||||||
* @param qualifier column qualifier to check
|
|
||||||
* @param op comparison operator to use
|
|
||||||
* @param value the expected value
|
|
||||||
* @param delete data to delete if check succeeds
|
|
||||||
* @throws IOException e
|
|
||||||
* @return true if the new delete was executed, false otherwise
|
|
||||||
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator op, byte[] value, Delete delete) throws IOException {
|
|
||||||
RowMutations mutations = new RowMutations(delete.getRow(), 1);
|
|
||||||
mutations.add(delete);
|
|
||||||
|
|
||||||
return checkAndMutate(row, family, qualifier, op, value, mutations);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
|
* Atomically checks if a row/family/qualifier value matches the expected value. If it does, it
|
||||||
* adds the Put/Delete/RowMutations.
|
* adds the Put/Delete/RowMutations.
|
||||||
|
@ -731,32 +597,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Atomically checks if a row/family/qualifier value matches the expected value.
|
|
||||||
* If it does, it performs the row mutations. If the passed value is null, the check
|
|
||||||
* is for the lack of column (ie: non-existence)
|
|
||||||
*
|
|
||||||
* The expected value argument of this call is on the left and the current
|
|
||||||
* value of the cell is on the right side of the comparison operator.
|
|
||||||
*
|
|
||||||
* Ie. eg. GREATER operator means expected value > existing <=> perform row mutations.
|
|
||||||
*
|
|
||||||
* @param row to check
|
|
||||||
* @param family column family to check
|
|
||||||
* @param qualifier column qualifier to check
|
|
||||||
* @param op the comparison operator
|
|
||||||
* @param value the expected value
|
|
||||||
* @param mutation mutations to perform if check succeeds
|
|
||||||
* @throws IOException e
|
|
||||||
* @return true if the new put was executed, false otherwise
|
|
||||||
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
|
|
||||||
byte[] value, RowMutations mutation) throws IOException {
|
|
||||||
throw new NotImplementedException("Add an implementation!");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get timeout of each rpc request in this Table instance. It will be overridden by a more
|
* Get timeout of each rpc request in this Table instance. It will be overridden by a more
|
||||||
* specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
|
* specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
|
||||||
|
@ -769,36 +609,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get timeout (millisecond) of each rpc request in this Table instance.
|
|
||||||
*
|
|
||||||
* @return Currently configured read timeout
|
|
||||||
* @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or
|
|
||||||
* {@link #getWriteRpcTimeout(TimeUnit)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default int getRpcTimeout() {
|
|
||||||
return (int)getRpcTimeout(TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set timeout (millisecond) of each rpc request in operations of this Table instance, will
|
|
||||||
* override the value of hbase.rpc.timeout in configuration.
|
|
||||||
* If a rpc request waiting too long, it will stop waiting and send a new request to retry until
|
|
||||||
* retries exhausted or operation timeout reached.
|
|
||||||
* <p>
|
|
||||||
* NOTE: This will set both the read and write timeout settings to the provided value.
|
|
||||||
*
|
|
||||||
* @param rpcTimeout the timeout of each rpc request in millisecond.
|
|
||||||
*
|
|
||||||
* @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default void setRpcTimeout(int rpcTimeout) {
|
|
||||||
setReadRpcTimeout(rpcTimeout);
|
|
||||||
setWriteRpcTimeout(rpcTimeout);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get timeout of each rpc read request in this Table instance.
|
* Get timeout of each rpc read request in this Table instance.
|
||||||
* @param unit the unit of time the timeout to be represented in
|
* @param unit the unit of time the timeout to be represented in
|
||||||
|
@ -808,30 +618,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get timeout (millisecond) of each rpc read request in this Table instance.
|
|
||||||
* @deprecated since 2.0 and will be removed in 3.0 version
|
|
||||||
* use {@link #getReadRpcTimeout(TimeUnit)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default int getReadRpcTimeout() {
|
|
||||||
return (int)getReadRpcTimeout(TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set timeout (millisecond) of each rpc read request in operations of this Table instance, will
|
|
||||||
* override the value of hbase.rpc.read.timeout in configuration.
|
|
||||||
* If a rpc read request waiting too long, it will stop waiting and send a new request to retry
|
|
||||||
* until retries exhausted or operation timeout reached.
|
|
||||||
*
|
|
||||||
* @param readRpcTimeout the timeout for read rpc request in milliseconds
|
|
||||||
* @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default void setReadRpcTimeout(int readRpcTimeout) {
|
|
||||||
throw new NotImplementedException("Add an implementation!");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get timeout of each rpc write request in this Table instance.
|
* Get timeout of each rpc write request in this Table instance.
|
||||||
* @param unit the unit of time the timeout to be represented in
|
* @param unit the unit of time the timeout to be represented in
|
||||||
|
@ -841,30 +627,6 @@ public interface Table extends Closeable {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get timeout (millisecond) of each rpc write request in this Table instance.
|
|
||||||
* @deprecated since 2.0 and will be removed in 3.0 version
|
|
||||||
* use {@link #getWriteRpcTimeout(TimeUnit)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default int getWriteRpcTimeout() {
|
|
||||||
return (int)getWriteRpcTimeout(TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set timeout (millisecond) of each rpc write request in operations of this Table instance, will
|
|
||||||
* override the value of hbase.rpc.write.timeout in configuration.
|
|
||||||
* If a rpc write request waiting too long, it will stop waiting and send a new request to retry
|
|
||||||
* until retries exhausted or operation timeout reached.
|
|
||||||
*
|
|
||||||
* @param writeRpcTimeout the timeout for write rpc request in milliseconds
|
|
||||||
* @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default void setWriteRpcTimeout(int writeRpcTimeout) {
|
|
||||||
throw new NotImplementedException("Add an implementation!");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get timeout of each operation in Table instance.
|
* Get timeout of each operation in Table instance.
|
||||||
* @param unit the unit of time the timeout to be represented in
|
* @param unit the unit of time the timeout to be represented in
|
||||||
|
@ -873,30 +635,4 @@ public interface Table extends Closeable {
|
||||||
default long getOperationTimeout(TimeUnit unit) {
|
default long getOperationTimeout(TimeUnit unit) {
|
||||||
throw new NotImplementedException("Add an implementation!");
|
throw new NotImplementedException("Add an implementation!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get timeout (millisecond) of each operation for in Table instance.
|
|
||||||
* @deprecated since 2.0 and will be removed in 3.0 version
|
|
||||||
* use {@link #getOperationTimeout(TimeUnit)} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default int getOperationTimeout() {
|
|
||||||
return (int)getOperationTimeout(TimeUnit.MILLISECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set timeout (millisecond) of each operation in this Table instance, will override the value
|
|
||||||
* of hbase.client.operation.timeout in configuration.
|
|
||||||
* Operation timeout is a top-level restriction that makes sure a blocking method will not be
|
|
||||||
* blocked more than this. In each operation, if rpc request fails because of timeout or
|
|
||||||
* other reason, it will retry until success or throw a RetriesExhaustedException. But if the
|
|
||||||
* total time being blocking reach the operation timeout before retries exhausted, it will break
|
|
||||||
* early and throw SocketTimeoutException.
|
|
||||||
* @param operationTimeout the total timeout of each operation in millisecond.
|
|
||||||
* @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
default void setOperationTimeout(int operationTimeout) {
|
|
||||||
throw new NotImplementedException("Add an implementation!");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1353,7 +1353,7 @@ public class TestAsyncProcess {
|
||||||
ap.previousTimeout = -1;
|
ap.previousTimeout = -1;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
ht.existsAll(gets);
|
ht.exists(gets);
|
||||||
} catch (ClassCastException e) {
|
} catch (ClassCastException e) {
|
||||||
// No result response on this test.
|
// No result response on this test.
|
||||||
}
|
}
|
||||||
|
|
|
@ -298,7 +298,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase {
|
||||||
RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
|
RegionLocator regionLocator = conn.getRegionLocator(getTablename())) {
|
||||||
|
|
||||||
// Configure the partitioner and other things needed for HFileOutputFormat.
|
// Configure the partitioner and other things needed for HFileOutputFormat.
|
||||||
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
|
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
|
||||||
|
|
||||||
// Run the job making sure it works.
|
// Run the job making sure it works.
|
||||||
assertEquals(true, job.waitForCompletion(true));
|
assertEquals(true, job.waitForCompletion(true));
|
||||||
|
|
|
@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||||
|
@ -521,7 +522,7 @@ public class TestHFileOutputFormat2 {
|
||||||
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
|
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
|
||||||
setupMockStartKeys(regionLocator);
|
setupMockStartKeys(regionLocator);
|
||||||
setupMockTableName(regionLocator);
|
setupMockTableName(regionLocator);
|
||||||
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
|
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
|
||||||
assertEquals(job.getNumReduceTasks(), 4);
|
assertEquals(job.getNumReduceTasks(), 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -631,7 +632,7 @@ public class TestHFileOutputFormat2 {
|
||||||
assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
|
assertEquals("Should make " + regionNum + " regions", numRegions, regionNum);
|
||||||
|
|
||||||
allTables.put(tableStrSingle, table);
|
allTables.put(tableStrSingle, table);
|
||||||
tableInfo.add(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), r));
|
tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r));
|
||||||
}
|
}
|
||||||
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
|
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
|
||||||
// Generate the bulk load files
|
// Generate the bulk load files
|
||||||
|
@ -818,7 +819,7 @@ public class TestHFileOutputFormat2 {
|
||||||
conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY,
|
conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY,
|
||||||
HFileOutputFormat2.serializeColumnFamilyAttribute
|
HFileOutputFormat2.serializeColumnFamilyAttribute
|
||||||
(HFileOutputFormat2.compressionDetails,
|
(HFileOutputFormat2.compressionDetails,
|
||||||
Arrays.asList(table.getTableDescriptor())));
|
Arrays.asList(table.getDescriptor())));
|
||||||
|
|
||||||
// read back family specific compression setting from the configuration
|
// read back family specific compression setting from the configuration
|
||||||
Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
|
Map<byte[], Algorithm> retrievedFamilyToCompressionMap = HFileOutputFormat2
|
||||||
|
@ -844,7 +845,7 @@ public class TestHFileOutputFormat2 {
|
||||||
.setBlockCacheEnabled(false)
|
.setBlockCacheEnabled(false)
|
||||||
.setTimeToLive(0));
|
.setTimeToLive(0));
|
||||||
}
|
}
|
||||||
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
|
Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -890,7 +891,7 @@ public class TestHFileOutputFormat2 {
|
||||||
familyToBloomType);
|
familyToBloomType);
|
||||||
conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY,
|
conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY,
|
||||||
HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails,
|
HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails,
|
||||||
Arrays.asList(table.getTableDescriptor())));
|
Arrays.asList(table.getDescriptor())));
|
||||||
|
|
||||||
// read back family specific data block encoding settings from the
|
// read back family specific data block encoding settings from the
|
||||||
// configuration
|
// configuration
|
||||||
|
@ -918,7 +919,7 @@ public class TestHFileOutputFormat2 {
|
||||||
.setBlockCacheEnabled(false)
|
.setBlockCacheEnabled(false)
|
||||||
.setTimeToLive(0));
|
.setTimeToLive(0));
|
||||||
}
|
}
|
||||||
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
|
Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -962,7 +963,7 @@ public class TestHFileOutputFormat2 {
|
||||||
conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY,
|
conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY,
|
||||||
HFileOutputFormat2.serializeColumnFamilyAttribute
|
HFileOutputFormat2.serializeColumnFamilyAttribute
|
||||||
(HFileOutputFormat2.blockSizeDetails, Arrays.asList(table
|
(HFileOutputFormat2.blockSizeDetails, Arrays.asList(table
|
||||||
.getTableDescriptor())));
|
.getDescriptor())));
|
||||||
|
|
||||||
// read back family specific data block encoding settings from the
|
// read back family specific data block encoding settings from the
|
||||||
// configuration
|
// configuration
|
||||||
|
@ -991,7 +992,7 @@ public class TestHFileOutputFormat2 {
|
||||||
.setBlockCacheEnabled(false)
|
.setBlockCacheEnabled(false)
|
||||||
.setTimeToLive(0));
|
.setTimeToLive(0));
|
||||||
}
|
}
|
||||||
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
|
Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1036,7 +1037,7 @@ public class TestHFileOutputFormat2 {
|
||||||
Table table = Mockito.mock(Table.class);
|
Table table = Mockito.mock(Table.class);
|
||||||
setupMockColumnFamiliesForDataBlockEncoding(table,
|
setupMockColumnFamiliesForDataBlockEncoding(table,
|
||||||
familyToDataBlockEncoding);
|
familyToDataBlockEncoding);
|
||||||
HTableDescriptor tableDescriptor = table.getTableDescriptor();
|
TableDescriptor tableDescriptor = table.getDescriptor();
|
||||||
conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
|
conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY,
|
||||||
HFileOutputFormat2.serializeColumnFamilyAttribute
|
HFileOutputFormat2.serializeColumnFamilyAttribute
|
||||||
(HFileOutputFormat2.dataBlockEncodingDetails, Arrays
|
(HFileOutputFormat2.dataBlockEncodingDetails, Arrays
|
||||||
|
@ -1068,7 +1069,7 @@ public class TestHFileOutputFormat2 {
|
||||||
.setBlockCacheEnabled(false)
|
.setBlockCacheEnabled(false)
|
||||||
.setTimeToLive(0));
|
.setTimeToLive(0));
|
||||||
}
|
}
|
||||||
Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor();
|
Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1126,7 +1127,7 @@ public class TestHFileOutputFormat2 {
|
||||||
Table table = Mockito.mock(Table.class);
|
Table table = Mockito.mock(Table.class);
|
||||||
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
|
RegionLocator regionLocator = Mockito.mock(RegionLocator.class);
|
||||||
HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]);
|
HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]);
|
||||||
Mockito.doReturn(htd).when(table).getTableDescriptor();
|
Mockito.doReturn(htd).when(table).getDescriptor();
|
||||||
for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) {
|
for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) {
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
}
|
}
|
||||||
|
@ -1146,7 +1147,7 @@ public class TestHFileOutputFormat2 {
|
||||||
Job job = new Job(conf, "testLocalMRIncrementalLoad");
|
Job job = new Job(conf, "testLocalMRIncrementalLoad");
|
||||||
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
|
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings"));
|
||||||
setupRandomGeneratorMapper(job, false);
|
setupRandomGeneratorMapper(job, false);
|
||||||
HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator);
|
HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator);
|
||||||
FileOutputFormat.setOutputPath(job, dir);
|
FileOutputFormat.setOutputPath(job, dir);
|
||||||
context = createTestTaskAttemptContext(job);
|
context = createTestTaskAttemptContext(job);
|
||||||
HFileOutputFormat2 hof = new HFileOutputFormat2();
|
HFileOutputFormat2 hof = new HFileOutputFormat2();
|
||||||
|
@ -1248,7 +1249,7 @@ public class TestHFileOutputFormat2 {
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
|
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
|
||||||
runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
|
runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
|
||||||
.getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false);
|
.getDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false);
|
||||||
// Perform the actual load
|
// Perform the actual load
|
||||||
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
|
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
|
||||||
}
|
}
|
||||||
|
@ -1341,7 +1342,7 @@ public class TestHFileOutputFormat2 {
|
||||||
|
|
||||||
RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]);
|
RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]);
|
||||||
runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
|
runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table
|
||||||
.getTableDescriptor(), regionLocator)), testDir, false);
|
.getDescriptor(), regionLocator)), testDir, false);
|
||||||
|
|
||||||
// Perform the actual load
|
// Perform the actual load
|
||||||
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
|
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/**
|
||||||
*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -16,12 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.rest;
|
package org.apache.hadoop.hbase.rest;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.ws.rs.Consumes;
|
import javax.ws.rs.Consumes;
|
||||||
import javax.ws.rs.DELETE;
|
import javax.ws.rs.DELETE;
|
||||||
import javax.ws.rs.GET;
|
import javax.ws.rs.GET;
|
||||||
|
@ -35,20 +32,19 @@ import javax.ws.rs.core.Response;
|
||||||
import javax.ws.rs.core.Response.ResponseBuilder;
|
import javax.ws.rs.core.Response.ResponseBuilder;
|
||||||
import javax.ws.rs.core.UriInfo;
|
import javax.ws.rs.core.UriInfo;
|
||||||
import javax.xml.namespace.QName;
|
import javax.xml.namespace.QName;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
|
import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel;
|
||||||
import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
|
import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class SchemaResource extends ResourceBase {
|
public class SchemaResource extends ResourceBase {
|
||||||
|
@ -65,21 +61,15 @@ public class SchemaResource extends ResourceBase {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
* @param tableResource
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
public SchemaResource(TableResource tableResource) throws IOException {
|
public SchemaResource(TableResource tableResource) throws IOException {
|
||||||
super();
|
super();
|
||||||
this.tableResource = tableResource;
|
this.tableResource = tableResource;
|
||||||
}
|
}
|
||||||
|
|
||||||
private HTableDescriptor getTableSchema() throws IOException,
|
private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException {
|
||||||
TableNotFoundException {
|
try (Table table = servlet.getTable(tableResource.getName())) {
|
||||||
Table table = servlet.getTable(tableResource.getName());
|
return new HTableDescriptor(table.getDescriptor());
|
||||||
try {
|
|
||||||
return table.getTableDescriptor();
|
|
||||||
} finally {
|
|
||||||
table.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,20 +22,27 @@ import com.google.protobuf.Descriptors;
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
import com.google.protobuf.Service;
|
import com.google.protobuf.Service;
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InterruptedIOException;
|
||||||
|
import java.io.UnsupportedEncodingException;
|
||||||
|
import java.net.URLEncoder;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
import org.apache.hadoop.hbase.CompareOperator;
|
import org.apache.hadoop.hbase.CompareOperator;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
|
@ -63,19 +70,9 @@ import org.apache.hadoop.hbase.rest.model.ScannerModel;
|
||||||
import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
|
import org.apache.hadoop.hbase.rest.model.TableSchemaModel;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import java.io.IOException;
|
import org.slf4j.Logger;
|
||||||
import java.io.InterruptedIOException;
|
import org.slf4j.LoggerFactory;
|
||||||
import java.io.UnsupportedEncodingException;
|
|
||||||
import java.net.URLEncoder;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
@ -94,8 +91,8 @@ public class RemoteHTable implements Table {
|
||||||
final long sleepTime;
|
final long sleepTime;
|
||||||
|
|
||||||
@SuppressWarnings("rawtypes")
|
@SuppressWarnings("rawtypes")
|
||||||
protected String buildRowSpec(final byte[] row, final Map familyMap,
|
protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime,
|
||||||
final long startTime, final long endTime, final int maxVersions) {
|
final long endTime, final int maxVersions) {
|
||||||
StringBuffer sb = new StringBuffer();
|
StringBuffer sb = new StringBuffer();
|
||||||
sb.append('/');
|
sb.append('/');
|
||||||
sb.append(Bytes.toString(name));
|
sb.append(Bytes.toString(name));
|
||||||
|
@ -106,15 +103,15 @@ public class RemoteHTable implements Table {
|
||||||
Iterator i = familyMap.entrySet().iterator();
|
Iterator i = familyMap.entrySet().iterator();
|
||||||
sb.append('/');
|
sb.append('/');
|
||||||
while (i.hasNext()) {
|
while (i.hasNext()) {
|
||||||
Map.Entry e = (Map.Entry)i.next();
|
Map.Entry e = (Map.Entry) i.next();
|
||||||
Collection quals = (Collection)e.getValue();
|
Collection quals = (Collection) e.getValue();
|
||||||
if (quals == null || quals.isEmpty()) {
|
if (quals == null || quals.isEmpty()) {
|
||||||
// this is an unqualified family. append the family name and NO ':'
|
// this is an unqualified family. append the family name and NO ':'
|
||||||
sb.append(toURLEncodedBytes((byte[])e.getKey()));
|
sb.append(toURLEncodedBytes((byte[]) e.getKey()));
|
||||||
} else {
|
} else {
|
||||||
Iterator ii = quals.iterator();
|
Iterator ii = quals.iterator();
|
||||||
while (ii.hasNext()) {
|
while (ii.hasNext()) {
|
||||||
sb.append(toURLEncodedBytes((byte[])e.getKey()));
|
sb.append(toURLEncodedBytes((byte[]) e.getKey()));
|
||||||
Object o = ii.next();
|
Object o = ii.next();
|
||||||
// Puts use byte[] but Deletes use KeyValue
|
// Puts use byte[] but Deletes use KeyValue
|
||||||
if (o instanceof byte[]) {
|
if (o instanceof byte[]) {
|
||||||
|
@ -165,7 +162,7 @@ public class RemoteHTable implements Table {
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
sb.append("?");
|
sb.append("?");
|
||||||
for(int i=0; i<rows.length; i++) {
|
for (int i = 0; i < rows.length; i++) {
|
||||||
byte[] rk = rows[i];
|
byte[] rk = rows[i];
|
||||||
if (i != 0) {
|
if (i != 0) {
|
||||||
sb.append('&');
|
sb.append('&');
|
||||||
|
@ -181,9 +178,9 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
protected Result[] buildResultFromModel(final CellSetModel model) {
|
protected Result[] buildResultFromModel(final CellSetModel model) {
|
||||||
List<Result> results = new ArrayList<>();
|
List<Result> results = new ArrayList<>();
|
||||||
for (RowModel row: model.getRows()) {
|
for (RowModel row : model.getRows()) {
|
||||||
List<Cell> kvs = new ArrayList<>(row.getCells().size());
|
List<Cell> kvs = new ArrayList<>(row.getCells().size());
|
||||||
for (CellModel cell: row.getCells()) {
|
for (CellModel cell : row.getCells()) {
|
||||||
byte[][] split = CellUtil.parseColumn(cell.getColumn());
|
byte[][] split = CellUtil.parseColumn(cell.getColumn());
|
||||||
byte[] column = split[0];
|
byte[] column = split[0];
|
||||||
byte[] qualifier = null;
|
byte[] qualifier = null;
|
||||||
|
@ -194,8 +191,8 @@ public class RemoteHTable implements Table {
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
|
throw new IllegalArgumentException("Invalid familyAndQualifier provided.");
|
||||||
}
|
}
|
||||||
kvs.add(new KeyValue(row.getKey(), column, qualifier,
|
kvs
|
||||||
cell.getTimestamp(), cell.getValue()));
|
.add(new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue()));
|
||||||
}
|
}
|
||||||
results.add(Result.create(kvs));
|
results.add(Result.create(kvs));
|
||||||
}
|
}
|
||||||
|
@ -205,11 +202,10 @@ public class RemoteHTable implements Table {
|
||||||
protected CellSetModel buildModelFromPut(Put put) {
|
protected CellSetModel buildModelFromPut(Put put) {
|
||||||
RowModel row = new RowModel(put.getRow());
|
RowModel row = new RowModel(put.getRow());
|
||||||
long ts = put.getTimestamp();
|
long ts = put.getTimestamp();
|
||||||
for (List<Cell> cells: put.getFamilyCellMap().values()) {
|
for (List<Cell> cells : put.getFamilyCellMap().values()) {
|
||||||
for (Cell cell: cells) {
|
for (Cell cell : cells) {
|
||||||
row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
|
row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
|
||||||
ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(),
|
ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), CellUtil.cloneValue(cell)));
|
||||||
CellUtil.cloneValue(cell)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
CellSetModel model = new CellSetModel();
|
CellSetModel model = new CellSetModel();
|
||||||
|
@ -256,36 +252,6 @@ public class RemoteHTable implements Table {
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public HTableDescriptor getTableDescriptor() throws IOException {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
sb.append('/');
|
|
||||||
sb.append(Bytes.toString(name));
|
|
||||||
sb.append('/');
|
|
||||||
sb.append("schema");
|
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
|
||||||
Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
|
|
||||||
int code = response.getCode();
|
|
||||||
switch (code) {
|
|
||||||
case 200:
|
|
||||||
TableSchemaModel schema = new TableSchemaModel();
|
|
||||||
schema.getObjectFromMessage(response.getBody());
|
|
||||||
return schema.getTableDescriptor();
|
|
||||||
case 509:
|
|
||||||
try {
|
|
||||||
Thread.sleep(sleepTime);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw new IOException("schema request returned " + code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
throw new IOException("schema request timed out");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
client.shutdown();
|
client.shutdown();
|
||||||
|
@ -294,8 +260,8 @@ public class RemoteHTable implements Table {
|
||||||
@Override
|
@Override
|
||||||
public Result get(Get get) throws IOException {
|
public Result get(Get get) throws IOException {
|
||||||
TimeRange range = get.getTimeRange();
|
TimeRange range = get.getTimeRange();
|
||||||
String spec = buildRowSpec(get.getRow(), get.getFamilyMap(),
|
String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(),
|
||||||
range.getMin(), range.getMax(), get.getMaxVersions());
|
get.getMaxVersions());
|
||||||
if (get.getFilter() != null) {
|
if (get.getFilter() != null) {
|
||||||
LOG.warn("filters not supported on gets");
|
LOG.warn("filters not supported on gets");
|
||||||
}
|
}
|
||||||
|
@ -316,12 +282,13 @@ public class RemoteHTable implements Table {
|
||||||
int maxVersions = 1;
|
int maxVersions = 1;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
for(Get g:gets) {
|
for (Get g : gets) {
|
||||||
|
|
||||||
if ( count == 0 ) {
|
if (count == 0) {
|
||||||
maxVersions = g.getMaxVersions();
|
maxVersions = g.getMaxVersions();
|
||||||
} else if (g.getMaxVersions() != maxVersions) {
|
} else if (g.getMaxVersions() != maxVersions) {
|
||||||
LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")");
|
LOG.warn(
|
||||||
|
"MaxVersions on Gets do not match, using the first in the list (" + maxVersions + ")");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g.getFilter() != null) {
|
if (g.getFilter() != null) {
|
||||||
|
@ -329,7 +296,7 @@ public class RemoteHTable implements Table {
|
||||||
}
|
}
|
||||||
|
|
||||||
rows[count] = g.getRow();
|
rows[count] = g.getRow();
|
||||||
count ++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
String spec = buildMultiRowSpec(rows, maxVersions);
|
String spec = buildMultiRowSpec(rows, maxVersions);
|
||||||
|
@ -346,7 +313,7 @@ public class RemoteHTable implements Table {
|
||||||
CellSetModel model = new CellSetModel();
|
CellSetModel model = new CellSetModel();
|
||||||
model.getObjectFromMessage(response.getBody());
|
model.getObjectFromMessage(response.getBody());
|
||||||
Result[] results = buildResultFromModel(model);
|
Result[] results = buildResultFromModel(model);
|
||||||
if ( results.length > 0) {
|
if (results.length > 0) {
|
||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
// fall through
|
// fall through
|
||||||
|
@ -357,7 +324,7 @@ public class RemoteHTable implements Table {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -393,21 +360,21 @@ public class RemoteHTable implements Table {
|
||||||
sb.append('/');
|
sb.append('/');
|
||||||
sb.append(toURLEncodedBytes(put.getRow()));
|
sb.append(toURLEncodedBytes(put.getRow()));
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
Response response =
|
||||||
model.createProtobufOutput());
|
client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
return;
|
return;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("put request failed with " + code);
|
throw new IOException("put request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("put request timed out");
|
throw new IOException("put request timed out");
|
||||||
|
@ -419,24 +386,24 @@ public class RemoteHTable implements Table {
|
||||||
// ignores the row specification in the URI
|
// ignores the row specification in the URI
|
||||||
|
|
||||||
// separate puts by row
|
// separate puts by row
|
||||||
TreeMap<byte[],List<Cell>> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
TreeMap<byte[], List<Cell>> map = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||||
for (Put put: puts) {
|
for (Put put : puts) {
|
||||||
byte[] row = put.getRow();
|
byte[] row = put.getRow();
|
||||||
List<Cell> cells = map.get(row);
|
List<Cell> cells = map.get(row);
|
||||||
if (cells == null) {
|
if (cells == null) {
|
||||||
cells = new ArrayList<>();
|
cells = new ArrayList<>();
|
||||||
map.put(row, cells);
|
map.put(row, cells);
|
||||||
}
|
}
|
||||||
for (List<Cell> l: put.getFamilyCellMap().values()) {
|
for (List<Cell> l : put.getFamilyCellMap().values()) {
|
||||||
cells.addAll(l);
|
cells.addAll(l);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// build the cell set
|
// build the cell set
|
||||||
CellSetModel model = new CellSetModel();
|
CellSetModel model = new CellSetModel();
|
||||||
for (Map.Entry<byte[], List<Cell>> e: map.entrySet()) {
|
for (Map.Entry<byte[], List<Cell>> e : map.entrySet()) {
|
||||||
RowModel row = new RowModel(e.getKey());
|
RowModel row = new RowModel(e.getKey());
|
||||||
for (Cell cell: e.getValue()) {
|
for (Cell cell : e.getValue()) {
|
||||||
row.addCell(new CellModel(cell));
|
row.addCell(new CellModel(cell));
|
||||||
}
|
}
|
||||||
model.addRow(row);
|
model.addRow(row);
|
||||||
|
@ -448,21 +415,21 @@ public class RemoteHTable implements Table {
|
||||||
sb.append(Bytes.toString(name));
|
sb.append(Bytes.toString(name));
|
||||||
sb.append("/$multiput"); // can be any nonexistent row
|
sb.append("/$multiput"); // can be any nonexistent row
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF,
|
Response response =
|
||||||
model.createProtobufOutput());
|
client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
return;
|
return;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("multiput request failed with " + code);
|
throw new IOException("multiput request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("multiput request timed out");
|
throw new IOException("multiput request timed out");
|
||||||
|
@ -470,23 +437,23 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(Delete delete) throws IOException {
|
public void delete(Delete delete) throws IOException {
|
||||||
String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(),
|
String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimestamp(),
|
||||||
delete.getTimestamp(), delete.getTimestamp(), 1);
|
delete.getTimestamp(), 1);
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.delete(spec);
|
Response response = client.delete(spec);
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
return;
|
return;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("delete request failed with " + code);
|
throw new IOException("delete request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("delete request timed out");
|
throw new IOException("delete request timed out");
|
||||||
|
@ -494,7 +461,7 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(List<Delete> deletes) throws IOException {
|
public void delete(List<Delete> deletes) throws IOException {
|
||||||
for (Delete delete: deletes) {
|
for (Delete delete : deletes) {
|
||||||
delete(delete);
|
delete(delete);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -505,7 +472,31 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TableDescriptor getDescriptor() throws IOException {
|
public TableDescriptor getDescriptor() throws IOException {
|
||||||
return getTableDescriptor();
|
StringBuilder sb = new StringBuilder();
|
||||||
|
sb.append('/');
|
||||||
|
sb.append(Bytes.toString(name));
|
||||||
|
sb.append('/');
|
||||||
|
sb.append("schema");
|
||||||
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
|
Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
|
||||||
|
int code = response.getCode();
|
||||||
|
switch (code) {
|
||||||
|
case 200:
|
||||||
|
TableSchemaModel schema = new TableSchemaModel();
|
||||||
|
schema.getObjectFromMessage(response.getBody());
|
||||||
|
return schema.getTableDescriptor();
|
||||||
|
case 509:
|
||||||
|
try {
|
||||||
|
Thread.sleep(sleepTime);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new IOException("schema request returned " + code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new IOException("schema request timed out");
|
||||||
}
|
}
|
||||||
|
|
||||||
class Scanner implements ResultScanner {
|
class Scanner implements ResultScanner {
|
||||||
|
@ -525,22 +516,22 @@ public class RemoteHTable implements Table {
|
||||||
sb.append('/');
|
sb.append('/');
|
||||||
sb.append("scanner");
|
sb.append("scanner");
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.post(sb.toString(),
|
Response response =
|
||||||
Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 201:
|
case 201:
|
||||||
uri = response.getLocation();
|
uri = response.getLocation();
|
||||||
return;
|
return;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("scan request failed with " + code);
|
throw new IOException("scan request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("scan request timed out");
|
throw new IOException("scan request timed out");
|
||||||
|
@ -552,26 +543,25 @@ public class RemoteHTable implements Table {
|
||||||
sb.append("?n=");
|
sb.append("?n=");
|
||||||
sb.append(nbRows);
|
sb.append(nbRows);
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.get(sb.toString(),
|
Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF);
|
||||||
Constants.MIMETYPE_PROTOBUF);
|
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
CellSetModel model = new CellSetModel();
|
CellSetModel model = new CellSetModel();
|
||||||
model.getObjectFromMessage(response.getBody());
|
model.getObjectFromMessage(response.getBody());
|
||||||
return buildResultFromModel(model);
|
return buildResultFromModel(model);
|
||||||
case 204:
|
case 204:
|
||||||
case 206:
|
case 206:
|
||||||
return null;
|
return null;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("scanner.next request failed with " + code);
|
throw new IOException("scanner.next request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("scanner.next request timed out");
|
throw new IOException("scanner.next request timed out");
|
||||||
|
@ -660,8 +650,7 @@ public class RemoteHTable implements Table {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ResultScanner getScanner(byte[] family, byte[] qualifier)
|
public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
|
||||||
throws IOException {
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.addColumn(family, qualifier);
|
scan.addColumn(family, qualifier);
|
||||||
return new Scanner(scan);
|
return new Scanner(scan);
|
||||||
|
@ -671,15 +660,8 @@ public class RemoteHTable implements Table {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
|
||||||
@Deprecated
|
throws IOException {
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
byte[] value, Put put) throws IOException {
|
|
||||||
return doCheckAndPut(row, family, qualifier, value, put);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
byte[] value, Put put) throws IOException {
|
|
||||||
// column to check-the-value
|
// column to check-the-value
|
||||||
put.add(new KeyValue(row, family, qualifier, value));
|
put.add(new KeyValue(row, family, qualifier, value));
|
||||||
|
|
||||||
|
@ -692,43 +674,30 @@ public class RemoteHTable implements Table {
|
||||||
sb.append("?check=put");
|
sb.append("?check=put");
|
||||||
|
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.put(sb.toString(),
|
Response response =
|
||||||
Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
return true;
|
return true;
|
||||||
case 304: // NOT-MODIFIED
|
case 304: // NOT-MODIFIED
|
||||||
return false;
|
return false;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (final InterruptedException e) {
|
} catch (final InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("checkAndPut request failed with " + code);
|
throw new IOException("checkAndPut request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("checkAndPut request timed out");
|
throw new IOException("checkAndPut request timed out");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
|
||||||
@Deprecated
|
Delete delete) throws IOException {
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, Put put) throws IOException {
|
|
||||||
throw new IOException("checkAndPut for non-equal comparison not implemented");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
byte[] value, Delete delete) throws IOException {
|
|
||||||
return doCheckAndDelete(row, family, qualifier, value, delete);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
byte[] value, Delete delete) throws IOException {
|
|
||||||
Put put = new Put(row);
|
Put put = new Put(row);
|
||||||
put.setFamilyCellMap(delete.getFamilyCellMap());
|
put.setFamilyCellMap(delete.getFamilyCellMap());
|
||||||
// column to check-the-value
|
// column to check-the-value
|
||||||
|
@ -742,47 +711,33 @@ public class RemoteHTable implements Table {
|
||||||
sb.append("?check=delete");
|
sb.append("?check=delete");
|
||||||
|
|
||||||
for (int i = 0; i < maxRetries; i++) {
|
for (int i = 0; i < maxRetries; i++) {
|
||||||
Response response = client.put(sb.toString(),
|
Response response =
|
||||||
Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput());
|
||||||
int code = response.getCode();
|
int code = response.getCode();
|
||||||
switch (code) {
|
switch (code) {
|
||||||
case 200:
|
case 200:
|
||||||
return true;
|
return true;
|
||||||
case 304: // NOT-MODIFIED
|
case 304: // NOT-MODIFIED
|
||||||
return false;
|
return false;
|
||||||
case 509:
|
case 509:
|
||||||
try {
|
try {
|
||||||
Thread.sleep(sleepTime);
|
Thread.sleep(sleepTime);
|
||||||
} catch (final InterruptedException e) {
|
} catch (final InterruptedException e) {
|
||||||
throw (InterruptedIOException)new InterruptedIOException().initCause(e);
|
throw (InterruptedIOException) new InterruptedIOException().initCause(e);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("checkAndDelete request failed with " + code);
|
throw new IOException("checkAndDelete request failed with " + code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
throw new IOException("checkAndDelete request timed out");
|
throw new IOException("checkAndDelete request timed out");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, Delete delete) throws IOException {
|
|
||||||
throw new IOException("checkAndDelete for non-equal comparison not implemented");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
||||||
return new CheckAndMutateBuilderImpl(row, family);
|
return new CheckAndMutateBuilderImpl(row, family);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, RowMutations rm) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("checkAndMutate not implemented");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Result increment(Increment increment) throws IOException {
|
public Result increment(Increment increment) throws IOException {
|
||||||
throw new IOException("Increment not supported");
|
throw new IOException("Increment not supported");
|
||||||
|
@ -794,14 +749,14 @@ public class RemoteHTable implements Table {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount)
|
||||||
long amount) throws IOException {
|
throws IOException {
|
||||||
throw new IOException("incrementColumnValue not supported");
|
throw new IOException("incrementColumnValue not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount,
|
||||||
long amount, Durability durability) throws IOException {
|
Durability durability) throws IOException {
|
||||||
throw new IOException("incrementColumnValue not supported");
|
throw new IOException("incrementColumnValue not supported");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -822,15 +777,14 @@ public class RemoteHTable implements Table {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service,
|
public <T extends Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey,
|
||||||
byte[] startKey, byte[] endKey, Batch.Call<T, R> callable)
|
byte[] endKey, Batch.Call<T, R> callable) throws ServiceException, Throwable {
|
||||||
throws ServiceException, Throwable {
|
|
||||||
throw new UnsupportedOperationException("coprocessorService not implemented");
|
throw new UnsupportedOperationException("coprocessorService not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <T extends Service, R> void coprocessorService(Class<T> service,
|
public <T extends Service, R> void coprocessorService(Class<T> service, byte[] startKey,
|
||||||
byte[] startKey, byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback)
|
byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback)
|
||||||
throws ServiceException, Throwable {
|
throws ServiceException, Throwable {
|
||||||
throw new UnsupportedOperationException("coprocessorService not implemented");
|
throw new UnsupportedOperationException("coprocessorService not implemented");
|
||||||
}
|
}
|
||||||
|
@ -842,93 +796,42 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <R extends Message> Map<byte[], R> batchCoprocessorService(
|
public <R extends Message> Map<byte[], R> batchCoprocessorService(
|
||||||
Descriptors.MethodDescriptor method, Message request,
|
Descriptors.MethodDescriptor method, Message request, byte[] startKey, byte[] endKey,
|
||||||
byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable {
|
R responsePrototype) throws ServiceException, Throwable {
|
||||||
throw new UnsupportedOperationException("batchCoprocessorService not implemented");
|
throw new UnsupportedOperationException("batchCoprocessorService not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <R extends Message> void batchCoprocessorService(
|
public <R extends Message> void batchCoprocessorService(Descriptors.MethodDescriptor method,
|
||||||
Descriptors.MethodDescriptor method, Message request,
|
Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
|
||||||
byte[] startKey, byte[] endKey, R responsePrototype, Callback<R> callback)
|
|
||||||
throws ServiceException, Throwable {
|
throws ServiceException, Throwable {
|
||||||
throw new UnsupportedOperationException("batchCoprocessorService not implemented");
|
throw new UnsupportedOperationException("batchCoprocessorService not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setOperationTimeout(int operationTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getOperationTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setRpcTimeout(int rpcTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getReadRpcTimeout(TimeUnit unit) {
|
public long getReadRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getRpcTimeout(TimeUnit unit) {
|
public long getRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getReadRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setReadRpcTimeout(int readRpcTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getWriteRpcTimeout(TimeUnit unit) {
|
public long getWriteRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getWriteRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setWriteRpcTimeout(int writeRpcTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getOperationTimeout(TimeUnit unit) {
|
public long getOperationTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only a small subset of characters are valid in URLs.
|
* Only a small subset of characters are valid in URLs. Row keys, column families, and qualifiers
|
||||||
*
|
* cannot be appended to URLs without first URL escaping. Table names are ok because they can only
|
||||||
* Row keys, column families, and qualifiers cannot be appended to URLs without first URL
|
* contain alphanumeric, ".","_", and "-" which are valid characters in URLs.
|
||||||
* escaping. Table names are ok because they can only contain alphanumeric, ".","_", and "-"
|
|
||||||
* which are valid characters in URLs.
|
|
||||||
*/
|
*/
|
||||||
private static String toURLEncodedBytes(byte[] row) {
|
private static String toURLEncodedBytes(byte[] row) {
|
||||||
try {
|
try {
|
||||||
|
@ -953,7 +856,7 @@ public class RemoteHTable implements Table {
|
||||||
@Override
|
@Override
|
||||||
public CheckAndMutateBuilder qualifier(byte[] qualifier) {
|
public CheckAndMutateBuilder qualifier(byte[] qualifier) {
|
||||||
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" +
|
this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" +
|
||||||
" an empty byte array, or just do not call this method if you want a null qualifier");
|
" an empty byte array, or just do not call this method if you want a null qualifier");
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -964,8 +867,8 @@ public class RemoteHTable implements Table {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CheckAndMutateBuilder ifNotExists() {
|
public CheckAndMutateBuilder ifNotExists() {
|
||||||
throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison "
|
throw new UnsupportedOperationException(
|
||||||
+ "not implemented");
|
"CheckAndMutate for non-equal comparison " + "not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -974,8 +877,8 @@ public class RemoteHTable implements Table {
|
||||||
this.value = Preconditions.checkNotNull(value, "value is null");
|
this.value = Preconditions.checkNotNull(value, "value is null");
|
||||||
return this;
|
return this;
|
||||||
} else {
|
} else {
|
||||||
throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison " +
|
throw new UnsupportedOperationException(
|
||||||
"not implemented");
|
"CheckAndMutate for non-equal comparison " + "not implemented");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility;
|
import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility;
|
||||||
import org.apache.hadoop.hbase.rest.RESTServlet;
|
import org.apache.hadoop.hbase.rest.RESTServlet;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
@ -152,13 +153,9 @@ public class TestRemoteTable {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testGetTableDescriptor() throws IOException {
|
public void testGetTableDescriptor() throws IOException {
|
||||||
Table table = null;
|
try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) {
|
||||||
try {
|
TableDescriptor local = table.getDescriptor();
|
||||||
table = TEST_UTIL.getConnection().getTable(TABLE);
|
assertEquals(remoteTable.getDescriptor(), new HTableDescriptor(local));
|
||||||
HTableDescriptor local = table.getTableDescriptor();
|
|
||||||
assertEquals(remoteTable.getTableDescriptor(), local);
|
|
||||||
} finally {
|
|
||||||
if (null != table) table.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +502,7 @@ public class TestRemoteTable {
|
||||||
assertTrue(Bytes.equals(VALUE_1, value1));
|
assertTrue(Bytes.equals(VALUE_1, value1));
|
||||||
assertNull(value2);
|
assertNull(value2);
|
||||||
assertTrue(remoteTable.exists(get));
|
assertTrue(remoteTable.exists(get));
|
||||||
assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length);
|
assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length);
|
||||||
Delete delete = new Delete(ROW_1);
|
Delete delete = new Delete(ROW_1);
|
||||||
|
|
||||||
remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1)
|
remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1)
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
import="java.util.TreeMap"
|
import="java.util.TreeMap"
|
||||||
import="org.apache.commons.lang3.StringEscapeUtils"
|
import="org.apache.commons.lang3.StringEscapeUtils"
|
||||||
import="org.apache.hadoop.conf.Configuration"
|
import="org.apache.hadoop.conf.Configuration"
|
||||||
|
import="org.apache.hadoop.hbase.HTableDescriptor"
|
||||||
import="org.apache.hadoop.hbase.HColumnDescriptor"
|
import="org.apache.hadoop.hbase.HColumnDescriptor"
|
||||||
import="org.apache.hadoop.hbase.HConstants"
|
import="org.apache.hadoop.hbase.HConstants"
|
||||||
import="org.apache.hadoop.hbase.HRegionLocation"
|
import="org.apache.hadoop.hbase.HRegionLocation"
|
||||||
|
@ -131,7 +132,7 @@
|
||||||
if ( fqtn != null ) {
|
if ( fqtn != null ) {
|
||||||
try {
|
try {
|
||||||
table = master.getConnection().getTable(TableName.valueOf(fqtn));
|
table = master.getConnection().getTable(TableName.valueOf(fqtn));
|
||||||
if (table.getTableDescriptor().getRegionReplication() > 1) {
|
if (table.getDescriptor().getRegionReplication() > 1) {
|
||||||
tableHeader = "<h2>Table Regions</h2><table id=\"tableRegionTable\" class=\"tablesorter table table-striped\" style=\"table-layout: fixed; word-wrap: break-word;\"><thead><tr><th>Name</th><th>Region Server</th><th>ReadRequests</th><th>WriteRequests</th><th>StorefileSize</th><th>Num.Storefiles</th><th>MemSize</th><th>Locality</th><th>Start Key</th><th>End Key</th><th>ReplicaID</th></tr></thead>";
|
tableHeader = "<h2>Table Regions</h2><table id=\"tableRegionTable\" class=\"tablesorter table table-striped\" style=\"table-layout: fixed; word-wrap: break-word;\"><thead><tr><th>Name</th><th>Region Server</th><th>ReadRequests</th><th>WriteRequests</th><th>StorefileSize</th><th>Num.Storefiles</th><th>MemSize</th><th>Locality</th><th>Start Key</th><th>End Key</th><th>ReplicaID</th></tr></thead>";
|
||||||
withReplica = true;
|
withReplica = true;
|
||||||
} else {
|
} else {
|
||||||
|
@ -365,7 +366,7 @@ if ( fqtn != null ) {
|
||||||
<th></th>
|
<th></th>
|
||||||
</tr>
|
</tr>
|
||||||
<%
|
<%
|
||||||
Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
|
Collection<HColumnDescriptor> families = new HTableDescriptor(table.getDescriptor()).getFamilies();
|
||||||
for (HColumnDescriptor family: families) {
|
for (HColumnDescriptor family: families) {
|
||||||
%>
|
%>
|
||||||
<tr>
|
<tr>
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.client;
|
package org.apache.hadoop.hbase.client;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hbase.HBaseTestingUtility.countRows;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
|
@ -59,7 +60,6 @@ import org.apache.hadoop.hbase.KeepDeletedCells;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.PrivateCellUtil;
|
import org.apache.hadoop.hbase.PrivateCellUtil;
|
||||||
import org.apache.hadoop.hbase.RegionLocations;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.Waiter;
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
|
@ -100,7 +100,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
|
import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -180,31 +179,24 @@ public class TestFromClientSide {
|
||||||
// Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
|
// Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
|
||||||
c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
|
c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
|
||||||
|
|
||||||
Connection connection = ConnectionFactory.createConnection(c);
|
|
||||||
try (Table t = connection.getTable(TableName.valueOf(name.getMethodName()))) {
|
|
||||||
if (t instanceof HTable) {
|
|
||||||
HTable table = (HTable) t;
|
|
||||||
table.setOperationTimeout(3 * 1000);
|
|
||||||
|
|
||||||
try {
|
try (Connection connection = ConnectionFactory.createConnection(c);
|
||||||
Append append = new Append(ROW);
|
Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null)
|
||||||
append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
.setOperationTimeout(3 * 1000).build()) {
|
||||||
Result result = table.append(append);
|
Append append = new Append(ROW);
|
||||||
|
append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
||||||
|
Result result = table.append(append);
|
||||||
|
|
||||||
// Verify expected result
|
// Verify expected result
|
||||||
Cell[] cells = result.rawCells();
|
Cell[] cells = result.rawCells();
|
||||||
assertEquals(1, cells.length);
|
assertEquals(1, cells.length);
|
||||||
assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
||||||
|
|
||||||
// Verify expected result again
|
// Verify expected result again
|
||||||
Result readResult = table.get(new Get(ROW));
|
Result readResult = table.get(new Get(ROW));
|
||||||
cells = readResult.rawCells();
|
cells = readResult.rawCells();
|
||||||
assertEquals(1, cells.length);
|
assertEquals(1, cells.length);
|
||||||
assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE);
|
||||||
} finally {
|
|
||||||
connection.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,7 +476,7 @@ public class TestFromClientSide {
|
||||||
byte[] endKey = regions.get(0).getRegion().getEndKey();
|
byte[] endKey = regions.get(0).getRegion().getEndKey();
|
||||||
// Count rows with a filter that stops us before passed 'endKey'.
|
// Count rows with a filter that stops us before passed 'endKey'.
|
||||||
// Should be count of rows in first region.
|
// Should be count of rows in first region.
|
||||||
int endKeyCount = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey));
|
int endKeyCount = countRows(t, createScanWithRowFilter(endKey));
|
||||||
assertTrue(endKeyCount < rowCount);
|
assertTrue(endKeyCount < rowCount);
|
||||||
|
|
||||||
// How do I know I did not got to second region? Thats tough. Can't really
|
// How do I know I did not got to second region? Thats tough. Can't really
|
||||||
|
@ -496,30 +488,29 @@ public class TestFromClientSide {
|
||||||
// New test. Make it so scan goes into next region by one and then two.
|
// New test. Make it so scan goes into next region by one and then two.
|
||||||
// Make sure count comes out right.
|
// Make sure count comes out right.
|
||||||
byte[] key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 1)};
|
byte[] key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 1)};
|
||||||
int plusOneCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key));
|
int plusOneCount = countRows(t, createScanWithRowFilter(key));
|
||||||
assertEquals(endKeyCount + 1, plusOneCount);
|
assertEquals(endKeyCount + 1, plusOneCount);
|
||||||
key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 2)};
|
key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 2)};
|
||||||
int plusTwoCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key));
|
int plusTwoCount = countRows(t, createScanWithRowFilter(key));
|
||||||
assertEquals(endKeyCount + 2, plusTwoCount);
|
assertEquals(endKeyCount + 2, plusTwoCount);
|
||||||
|
|
||||||
// New test. Make it so I scan one less than endkey.
|
// New test. Make it so I scan one less than endkey.
|
||||||
key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] - 1)};
|
key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] - 1)};
|
||||||
int minusOneCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key));
|
int minusOneCount = countRows(t, createScanWithRowFilter(key));
|
||||||
assertEquals(endKeyCount - 1, minusOneCount);
|
assertEquals(endKeyCount - 1, minusOneCount);
|
||||||
// For above test... study logs. Make sure we do "Finished with scanning.."
|
// For above test... study logs. Make sure we do "Finished with scanning.."
|
||||||
// in first region and that we do not fall into the next region.
|
// in first region and that we do not fall into the next region.
|
||||||
|
|
||||||
key = new byte[]{'a', 'a', 'a'};
|
key = new byte[] { 'a', 'a', 'a' };
|
||||||
int countBBB = TEST_UTIL.countRows(t,
|
int countBBB = countRows(t, createScanWithRowFilter(key, null, CompareOperator.EQUAL));
|
||||||
createScanWithRowFilter(key, null, CompareOperator.EQUAL));
|
|
||||||
assertEquals(1, countBBB);
|
assertEquals(1, countBBB);
|
||||||
|
|
||||||
int countGreater = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey, null,
|
int countGreater =
|
||||||
CompareOperator.GREATER_OR_EQUAL));
|
countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL));
|
||||||
// Because started at start of table.
|
// Because started at start of table.
|
||||||
assertEquals(0, countGreater);
|
assertEquals(0, countGreater);
|
||||||
countGreater = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey, endKey,
|
countGreater =
|
||||||
CompareOperator.GREATER_OR_EQUAL));
|
countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL));
|
||||||
assertEquals(rowCount - endKeyCount, countGreater);
|
assertEquals(rowCount - endKeyCount, countGreater);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -551,16 +542,14 @@ public class TestFromClientSide {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertRowCount(final Table t, final int expected)
|
private void assertRowCount(final Table t, final int expected) throws IOException {
|
||||||
throws IOException {
|
assertEquals(expected, countRows(t, new Scan()));
|
||||||
assertEquals(expected, TEST_UTIL.countRows(t, new Scan()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* Split table into multiple regions.
|
* Split table into multiple regions.
|
||||||
* @param t Table to split.
|
* @param t Table to split.
|
||||||
* @return Map of regions to servers.
|
* @return Map of regions to servers.
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
private List<HRegionLocation> splitTable(final Table t)
|
private List<HRegionLocation> splitTable(final Table t)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
|
@ -4374,7 +4363,7 @@ public class TestFromClientSide {
|
||||||
// Test user metadata
|
// Test user metadata
|
||||||
try (Admin admin = TEST_UTIL.getAdmin()) {
|
try (Admin admin = TEST_UTIL.getAdmin()) {
|
||||||
// make a modifiable descriptor
|
// make a modifiable descriptor
|
||||||
HTableDescriptor desc = new HTableDescriptor(a.getTableDescriptor());
|
HTableDescriptor desc = new HTableDescriptor(a.getDescriptor());
|
||||||
// offline the table
|
// offline the table
|
||||||
admin.disableTable(tableAname);
|
admin.disableTable(tableAname);
|
||||||
// add a user attribute to HTD
|
// add a user attribute to HTD
|
||||||
|
@ -4390,7 +4379,7 @@ public class TestFromClientSide {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that attribute changes were applied
|
// Test that attribute changes were applied
|
||||||
HTableDescriptor desc = a.getTableDescriptor();
|
HTableDescriptor desc = new HTableDescriptor(a.getDescriptor());
|
||||||
assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname);
|
assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname);
|
||||||
// check HTD attribute
|
// check HTD attribute
|
||||||
value = desc.getValue(attrName);
|
value = desc.getValue(attrName);
|
||||||
|
@ -6445,19 +6434,6 @@ public class TestFromClientSide {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Pair<byte[][], byte[][]> getStartEndKeys(List<RegionLocations> regions) {
|
|
||||||
final byte[][] startKeyList = new byte[regions.size()][];
|
|
||||||
final byte[][] endKeyList = new byte[regions.size()][];
|
|
||||||
|
|
||||||
for (int i = 0; i < regions.size(); i++) {
|
|
||||||
RegionInfo region = regions.get(i).getRegionLocation().getRegion();
|
|
||||||
startKeyList[i] = region.getStartKey();
|
|
||||||
endKeyList[i] = region.getEndKey();
|
|
||||||
}
|
|
||||||
|
|
||||||
return new Pair<>(startKeyList, endKeyList);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFilterAllRecords() throws IOException {
|
public void testFilterAllRecords() throws IOException {
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
|
|
|
@ -311,7 +311,7 @@ public class TestFromClientSide3 {
|
||||||
|
|
||||||
// change the compaction.min config option for this table to 5
|
// change the compaction.min config option for this table to 5
|
||||||
LOG.info("hbase.hstore.compaction.min should now be 5");
|
LOG.info("hbase.hstore.compaction.min should now be 5");
|
||||||
HTableDescriptor htd = new HTableDescriptor(table.getTableDescriptor());
|
HTableDescriptor htd = new HTableDescriptor(table.getDescriptor());
|
||||||
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
|
htd.setValue("hbase.hstore.compaction.min", String.valueOf(5));
|
||||||
admin.modifyTable(htd);
|
admin.modifyTable(htd);
|
||||||
LOG.info("alter status finished");
|
LOG.info("alter status finished");
|
||||||
|
@ -368,8 +368,8 @@ public class TestFromClientSide3 {
|
||||||
htd.modifyFamily(hcd);
|
htd.modifyFamily(hcd);
|
||||||
admin.modifyTable(htd);
|
admin.modifyTable(htd);
|
||||||
LOG.info("alter status finished");
|
LOG.info("alter status finished");
|
||||||
assertNull(table.getTableDescriptor().getFamily(FAMILY).getValue(
|
assertNull(table.getDescriptor().getColumnFamily(FAMILY)
|
||||||
"hbase.hstore.compaction.min"));
|
.getValue(Bytes.toBytes("hbase.hstore.compaction.min")));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -541,7 +541,7 @@ public class TestFromClientSide3 {
|
||||||
getList.add(get);
|
getList.add(get);
|
||||||
getList.add(get2);
|
getList.add(get2);
|
||||||
|
|
||||||
boolean[] exists = table.existsAll(getList);
|
boolean[] exists = table.exists(getList);
|
||||||
assertEquals(true, exists[0]);
|
assertEquals(true, exists[0]);
|
||||||
assertEquals(true, exists[1]);
|
assertEquals(true, exists[1]);
|
||||||
|
|
||||||
|
@ -593,7 +593,7 @@ public class TestFromClientSide3 {
|
||||||
gets.add(new Get(Bytes.add(ANOTHERROW, new byte[]{0x00})));
|
gets.add(new Get(Bytes.add(ANOTHERROW, new byte[]{0x00})));
|
||||||
|
|
||||||
LOG.info("Calling exists");
|
LOG.info("Calling exists");
|
||||||
boolean[] results = table.existsAll(gets);
|
boolean[] results = table.exists(gets);
|
||||||
assertFalse(results[0]);
|
assertFalse(results[0]);
|
||||||
assertFalse(results[1]);
|
assertFalse(results[1]);
|
||||||
assertTrue(results[2]);
|
assertTrue(results[2]);
|
||||||
|
@ -607,7 +607,7 @@ public class TestFromClientSide3 {
|
||||||
gets = new ArrayList<>();
|
gets = new ArrayList<>();
|
||||||
gets.add(new Get(new byte[]{0x00}));
|
gets.add(new Get(new byte[]{0x00}));
|
||||||
gets.add(new Get(new byte[]{0x00, 0x00}));
|
gets.add(new Get(new byte[]{0x00, 0x00}));
|
||||||
results = table.existsAll(gets);
|
results = table.exists(gets);
|
||||||
assertTrue(results[0]);
|
assertTrue(results[0]);
|
||||||
assertFalse(results[1]);
|
assertFalse(results[1]);
|
||||||
|
|
||||||
|
@ -620,7 +620,7 @@ public class TestFromClientSide3 {
|
||||||
gets.add(new Get(new byte[]{(byte) 0xff}));
|
gets.add(new Get(new byte[]{(byte) 0xff}));
|
||||||
gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff}));
|
gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff}));
|
||||||
gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff, (byte) 0xff}));
|
gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff, (byte) 0xff}));
|
||||||
results = table.existsAll(gets);
|
results = table.exists(gets);
|
||||||
assertFalse(results[0]);
|
assertFalse(results[0]);
|
||||||
assertTrue(results[1]);
|
assertTrue(results[1]);
|
||||||
assertFalse(results[2]);
|
assertFalse(results[2]);
|
||||||
|
|
|
@ -112,30 +112,22 @@ public class TestIncrementsFromClientSide {
|
||||||
// Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
|
// Client will retry beacuse rpc timeout is small than the sleep time of first rpc call
|
||||||
c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
|
c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500);
|
||||||
|
|
||||||
Connection connection = ConnectionFactory.createConnection(c);
|
try (Connection connection = ConnectionFactory.createConnection(c);
|
||||||
Table t = connection.getTable(TableName.valueOf(name.getMethodName()));
|
Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null)
|
||||||
if (t instanceof HTable) {
|
.setOperationTimeout(3 * 1000).build()) {
|
||||||
HTable table = (HTable) t;
|
Increment inc = new Increment(ROW);
|
||||||
table.setOperationTimeout(3 * 1000);
|
inc.addColumn(HBaseTestingUtility.fam1, QUALIFIER, 1);
|
||||||
|
Result result = table.increment(inc);
|
||||||
|
|
||||||
try {
|
Cell[] cells = result.rawCells();
|
||||||
Increment inc = new Increment(ROW);
|
assertEquals(1, cells.length);
|
||||||
inc.addColumn(TEST_UTIL.fam1, QUALIFIER, 1);
|
assertIncrementKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, 1);
|
||||||
Result result = table.increment(inc);
|
|
||||||
|
|
||||||
Cell [] cells = result.rawCells();
|
// Verify expected result
|
||||||
assertEquals(1, cells.length);
|
Result readResult = table.get(new Get(ROW));
|
||||||
assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
|
cells = readResult.rawCells();
|
||||||
|
assertEquals(1, cells.length);
|
||||||
// Verify expected result
|
assertIncrementKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, 1);
|
||||||
Result readResult = table.get(new Get(ROW));
|
|
||||||
cells = readResult.rawCells();
|
|
||||||
assertEquals(1, cells.length);
|
|
||||||
assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1);
|
|
||||||
} finally {
|
|
||||||
table.close();
|
|
||||||
connection.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,10 +156,11 @@ public class TestPassCustomCellViaRegionObserver {
|
||||||
table.get(new Get(ROW)).isEmpty());
|
table.get(new Get(ROW)).isEmpty());
|
||||||
assertObserverHasExecuted();
|
assertObserverHasExecuted();
|
||||||
|
|
||||||
assertTrue(table.checkAndPut(ROW, FAMILY, QUALIFIER, null, put));
|
assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put));
|
||||||
assertObserverHasExecuted();
|
assertObserverHasExecuted();
|
||||||
|
|
||||||
assertTrue(table.checkAndDelete(ROW, FAMILY, QUALIFIER, VALUE, delete));
|
assertTrue(
|
||||||
|
table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete));
|
||||||
assertObserverHasExecuted();
|
assertObserverHasExecuted();
|
||||||
|
|
||||||
assertTrue(table.get(new Get(ROW)).isEmpty());
|
assertTrue(table.get(new Get(ROW)).isEmpty());
|
||||||
|
|
|
@ -258,7 +258,7 @@ public class TestMultiRowRangeFilter {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
||||||
|
@ -286,7 +286,7 @@ public class TestMultiRowRangeFilter {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
|
ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
|
||||||
|
@ -312,7 +312,7 @@ public class TestMultiRowRangeFilter {
|
||||||
Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
|
ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
|
||||||
|
@ -334,7 +334,7 @@ public class TestMultiRowRangeFilter {
|
||||||
Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false));
|
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false));
|
||||||
|
@ -356,7 +356,7 @@ public class TestMultiRowRangeFilter {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
||||||
|
@ -381,29 +381,28 @@ public class TestMultiRowRangeFilter {
|
||||||
public void testMultiRowRangeFilterWithExclusive() throws IOException {
|
public void testMultiRowRangeFilterWithExclusive() throws IOException {
|
||||||
tableName = TableName.valueOf(name.getMethodName());
|
tableName = TableName.valueOf(name.getMethodName());
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000);
|
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000);
|
||||||
Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
|
||||||
ht.setReadRpcTimeout(600000);
|
try (Table ht = TEST_UTIL.getConnection().getTableBuilder(tableName, null)
|
||||||
ht.setOperationTimeout(6000000);
|
.setReadRpcTimeout(600000).setOperationTimeout(6000000).build()) {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges = new ArrayList<>();
|
List<RowRange> ranges = new ArrayList<>();
|
||||||
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
||||||
ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false));
|
ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false));
|
||||||
ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
|
ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false));
|
||||||
|
|
||||||
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
|
MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
|
||||||
scan.setFilter(filter);
|
scan.setFilter(filter);
|
||||||
int resultsSize = getResultsSize(ht, scan);
|
int resultsSize = getResultsSize(ht, scan);
|
||||||
LOG.info("found " + resultsSize + " results");
|
LOG.info("found " + resultsSize + " results");
|
||||||
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
|
List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht);
|
||||||
List<Cell> results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht);
|
List<Cell> results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht);
|
||||||
|
|
||||||
assertEquals((results1.size() - 1) + results2.size(), resultsSize);
|
assertEquals((results1.size() - 1) + results2.size(), resultsSize);
|
||||||
|
}
|
||||||
ht.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -413,7 +412,7 @@ public class TestMultiRowRangeFilter {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges1 = new ArrayList<>();
|
List<RowRange> ranges1 = new ArrayList<>();
|
||||||
ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
|
||||||
|
@ -448,7 +447,7 @@ public class TestMultiRowRangeFilter {
|
||||||
generateRows(numRows, ht, family, qf, value);
|
generateRows(numRows, ht, family, qf, value);
|
||||||
|
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
|
|
||||||
List<RowRange> ranges1 = new ArrayList<>();
|
List<RowRange> ranges1 = new ArrayList<>();
|
||||||
ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
|
ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
|
||||||
|
@ -648,12 +647,12 @@ public class TestMultiRowRangeFilter {
|
||||||
|
|
||||||
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException {
|
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException {
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
scan.setMaxVersions();
|
scan.readAllVersions();
|
||||||
if(!Bytes.toString(startRow).isEmpty()) {
|
if(!Bytes.toString(startRow).isEmpty()) {
|
||||||
scan.setStartRow(startRow);
|
scan.withStartRow(startRow);
|
||||||
}
|
}
|
||||||
if(!Bytes.toString(stopRow).isEmpty()) {
|
if(!Bytes.toString(stopRow).isEmpty()) {
|
||||||
scan.setStopRow(stopRow);
|
scan.withStopRow(stopRow);
|
||||||
}
|
}
|
||||||
ResultScanner scanner = ht.getScanner(scan);
|
ResultScanner scanner = ht.getScanner(scan);
|
||||||
List<Cell> kvList = new ArrayList<>();
|
List<Cell> kvList = new ArrayList<>();
|
||||||
|
|
|
@ -24,7 +24,6 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.Waiter;
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
|
@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.CompactionState;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||||
|
@ -139,11 +139,10 @@ public class TestWarmupRegion {
|
||||||
RegionInfo info = region.getRegionInfo();
|
RegionInfo info = region.getRegionInfo();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
HTableDescriptor htd = table.getTableDescriptor();
|
TableDescriptor htd = table.getDescriptor();
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null);
|
warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
LOG.error("Failed warming up region " + info.getRegionNameAsString(), ie);
|
LOG.error("Failed warming up region " + info.getRegionNameAsString(), ie);
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,6 @@ import java.util.Map;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CompareOperator;
|
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
|
@ -81,12 +79,6 @@ public class RegionAsTable implements Table {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public HTableDescriptor getTableDescriptor() throws IOException {
|
|
||||||
return new HTableDescriptor(this.region.getTableDescriptor());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TableDescriptor getDescriptor() throws IOException {
|
public TableDescriptor getDescriptor() throws IOException {
|
||||||
return this.region.getTableDescriptor();
|
return this.region.getTableDescriptor();
|
||||||
|
@ -211,21 +203,6 @@ public class RegionAsTable implements Table {
|
||||||
for (Put put: puts) put(put);
|
for (Put put: puts) put(put);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
|
|
||||||
throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, Put put)
|
|
||||||
throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(Delete delete) throws IOException {
|
public void delete(Delete delete) throws IOException {
|
||||||
this.region.delete(delete);
|
this.region.delete(delete);
|
||||||
|
@ -236,21 +213,6 @@ public class RegionAsTable implements Table {
|
||||||
for(Delete delete: deletes) delete(delete);
|
for(Delete delete: deletes) delete(delete);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
|
|
||||||
Delete delete)
|
|
||||||
throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, Delete delete)
|
|
||||||
throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
|
@ -325,77 +287,26 @@ public class RegionAsTable implements Table {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
|
|
||||||
CompareOperator compareOp, byte[] value, RowMutations mutation) throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setOperationTimeout(int operationTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getOperationTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setRpcTimeout(int rpcTimeout) {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getReadRpcTimeout(TimeUnit unit) {
|
public long getReadRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setWriteRpcTimeout(int writeRpcTimeout) {throw new UnsupportedOperationException(); }
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getOperationTimeout(TimeUnit unit) {
|
public long getOperationTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public void setReadRpcTimeout(int readRpcTimeout) {throw new UnsupportedOperationException(); }
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getWriteRpcTimeout(TimeUnit unit) {
|
public long getWriteRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getRpcTimeout(TimeUnit unit) {
|
public long getRpcTimeout(TimeUnit unit) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getWriteRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
@Deprecated
|
|
||||||
public int getReadRpcTimeout() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RegionLocator getRegionLocator() throws IOException {
|
public RegionLocator getRegionLocator() throws IOException {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class TestNewVersionBehaviorFromClientSide {
|
||||||
fam.setNewVersionBehavior(true);
|
fam.setNewVersionBehavior(true);
|
||||||
fam.setMaxVersions(3);
|
fam.setMaxVersions(3);
|
||||||
table.addFamily(fam);
|
table.addFamily(fam);
|
||||||
TEST_UTIL.getHBaseAdmin().createTable(table);
|
TEST_UTIL.getAdmin().createTable(table);
|
||||||
return TEST_UTIL.getConnection().getTable(tableName);
|
return TEST_UTIL.getConnection().getTable(tableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,10 +310,11 @@ public class TestNewVersionBehaviorFromClientSide {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testgetColumnHint() throws IOException {
|
public void testGetColumnHint() throws IOException {
|
||||||
try (Table t = createTable()) {
|
createTable();
|
||||||
t.setOperationTimeout(10000);
|
try (Table t =
|
||||||
t.setRpcTimeout(10000);
|
TEST_UTIL.getConnection().getTableBuilder(TableName.valueOf(name.getMethodName()), null)
|
||||||
|
.setOperationTimeout(10000).setRpcTimeout(10000).build()) {
|
||||||
t.put(new Put(ROW).addColumn(FAMILY, col1, 100, value));
|
t.put(new Put(ROW).addColumn(FAMILY, col1, 100, value));
|
||||||
t.put(new Put(ROW).addColumn(FAMILY, col1, 101, value));
|
t.put(new Put(ROW).addColumn(FAMILY, col1, 101, value));
|
||||||
t.put(new Put(ROW).addColumn(FAMILY, col1, 102, value));
|
t.put(new Put(ROW).addColumn(FAMILY, col1, 102, value));
|
||||||
|
|
|
@ -353,13 +353,6 @@ public class TestPerColumnFamilyFlush {
|
||||||
TEST_UTIL.getAdmin().createNamespace(
|
TEST_UTIL.getAdmin().createNamespace(
|
||||||
NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build());
|
NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build());
|
||||||
Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES);
|
Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES);
|
||||||
HTableDescriptor htd = table.getTableDescriptor();
|
|
||||||
|
|
||||||
for (byte[] family : FAMILIES) {
|
|
||||||
if (!htd.hasFamily(family)) {
|
|
||||||
htd.addFamily(new HColumnDescriptor(family));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add 100 edits for CF1, 20 for CF2, 20 for CF3.
|
// Add 100 edits for CF1, 20 for CF2, 20 for CF3.
|
||||||
// These will all be interleaved in the log.
|
// These will all be interleaved in the log.
|
||||||
|
|
|
@ -107,12 +107,10 @@ public class TestSettingTimeoutOnBlockingPoint {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
Thread getThread = new Thread(() -> {
|
Thread getThread = new Thread(() -> {
|
||||||
try {
|
try (Table table =
|
||||||
try( Table table = TEST_UTIL.getConnection().getTable(tableName)) {
|
TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) {
|
||||||
table.setRpcTimeout(1000);
|
Delete delete = new Delete(ROW1);
|
||||||
Delete delete = new Delete(ROW1);
|
table.delete(delete);
|
||||||
table.delete(delete);
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
Assert.fail(e.getMessage());
|
Assert.fail(e.getMessage());
|
||||||
}
|
}
|
||||||
|
@ -122,12 +120,12 @@ public class TestSettingTimeoutOnBlockingPoint {
|
||||||
Threads.sleep(1000);
|
Threads.sleep(1000);
|
||||||
getThread.start();
|
getThread.start();
|
||||||
Threads.sleep(2000);
|
Threads.sleep(2000);
|
||||||
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
|
try (Table table =
|
||||||
|
TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) {
|
||||||
// We have only two handlers. The first thread will get a write lock for row1 and occupy
|
// We have only two handlers. The first thread will get a write lock for row1 and occupy
|
||||||
// the first handler. The second thread need a read lock for row1, it should quit after 1000
|
// the first handler. The second thread need a read lock for row1, it should quit after 1000
|
||||||
// ms and give back the handler because it can not get the lock in time.
|
// ms and give back the handler because it can not get the lock in time.
|
||||||
// So we can get the value using the second handler.
|
// So we can get the value using the second handler.
|
||||||
table.setRpcTimeout(1000);
|
|
||||||
table.get(new Get(ROW2)); // Will throw exception if the timeout checking is failed
|
table.get(new Get(ROW2)); // Will throw exception if the timeout checking is failed
|
||||||
} finally {
|
} finally {
|
||||||
incrementThread.interrupt();
|
incrementThread.interrupt();
|
||||||
|
|
|
@ -28,17 +28,14 @@ import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellBuilder;
|
import org.apache.hadoop.hbase.CellBuilder;
|
||||||
import org.apache.hadoop.hbase.CellBuilderFactory;
|
import org.apache.hadoop.hbase.CellBuilderFactory;
|
||||||
import org.apache.hadoop.hbase.CellBuilderType;
|
import org.apache.hadoop.hbase.CellBuilderType;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.CompareOperator;
|
|
||||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.Stoppable;
|
import org.apache.hadoop.hbase.Stoppable;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
|
@ -302,11 +299,6 @@ public class TestWALEntrySinkFilter {
|
||||||
return configuration;
|
return configuration;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public HTableDescriptor getTableDescriptor() throws IOException {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TableDescriptor getDescriptor() throws IOException {
|
public TableDescriptor getDescriptor() throws IOException {
|
||||||
return null;
|
return null;
|
||||||
|
@ -372,16 +364,6 @@ public class TestWALEntrySinkFilter {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void delete(Delete delete) throws IOException {
|
public void delete(Delete delete) throws IOException {
|
||||||
|
|
||||||
|
@ -392,16 +374,6 @@ public class TestWALEntrySinkFilter {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Delete delete) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
|
||||||
return null;
|
return null;
|
||||||
|
@ -462,70 +434,26 @@ public class TestWALEntrySinkFilter {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, RowMutations mutation) throws IOException {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getRpcTimeout(TimeUnit unit) {
|
public long getRpcTimeout(TimeUnit unit) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getRpcTimeout() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setRpcTimeout(int rpcTimeout) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getReadRpcTimeout(TimeUnit unit) {
|
public long getReadRpcTimeout(TimeUnit unit) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getReadRpcTimeout() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setReadRpcTimeout(int readRpcTimeout) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getWriteRpcTimeout(TimeUnit unit) {
|
public long getWriteRpcTimeout(TimeUnit unit) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getWriteRpcTimeout() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setWriteRpcTimeout(int writeRpcTimeout) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getOperationTimeout(TimeUnit unit) {
|
public long getOperationTimeout(TimeUnit unit) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getOperationTimeout() {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setOperationTimeout(int operationTimeout) {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RegionLocator getRegionLocator() throws IOException {
|
public RegionLocator getRegionLocator() throws IOException {
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -110,7 +110,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
||||||
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
|
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
|
||||||
Connection connection = ConnectionFactory.createConnection(conf);
|
Connection connection = ConnectionFactory.createConnection(conf);
|
||||||
Table t = connection.getTable(TEST_TABLE);
|
Table t = connection.getTable(TEST_TABLE);
|
||||||
HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
|
HTableDescriptor htd = new HTableDescriptor(t.getDescriptor());
|
||||||
htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
|
htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted",
|
||||||
new Path(coprocessorPath),
|
new Path(coprocessorPath),
|
||||||
Coprocessor.PRIORITY_USER, null);
|
Coprocessor.PRIORITY_USER, null);
|
||||||
|
@ -122,7 +122,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
||||||
// swallow exception from coprocessor
|
// swallow exception from coprocessor
|
||||||
}
|
}
|
||||||
LOG.info("Done Modifying Table");
|
LOG.info("Done Modifying Table");
|
||||||
assertEquals(0, t.getTableDescriptor().getCoprocessors().size());
|
assertEquals(0, t.getDescriptor().getCoprocessorDescriptors().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -155,7 +155,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
||||||
// coprocessor file
|
// coprocessor file
|
||||||
admin.disableTable(TEST_TABLE);
|
admin.disableTable(TEST_TABLE);
|
||||||
Table t = connection.getTable(TEST_TABLE);
|
Table t = connection.getTable(TEST_TABLE);
|
||||||
HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor());
|
HTableDescriptor htd = new HTableDescriptor(t.getDescriptor());
|
||||||
htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
|
htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted",
|
||||||
new Path(coprocessorPath),
|
new Path(coprocessorPath),
|
||||||
Coprocessor.PRIORITY_USER, null);
|
Coprocessor.PRIORITY_USER, null);
|
||||||
|
@ -321,6 +321,6 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil {
|
||||||
// ensure table was created and coprocessor is added to table
|
// ensure table was created and coprocessor is added to table
|
||||||
LOG.info("Done Creating Table");
|
LOG.info("Done Creating Table");
|
||||||
Table t = connection.getTable(TEST_TABLE);
|
Table t = connection.getTable(TEST_TABLE);
|
||||||
assertEquals(1, t.getTableDescriptor().getCoprocessors().size());
|
assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -127,7 +127,7 @@ public class TestRegionSnapshotTask {
|
||||||
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
|
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
|
||||||
final SnapshotManifest manifest =
|
final SnapshotManifest manifest =
|
||||||
SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
|
SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
|
||||||
manifest.addTableDescriptor(table.getTableDescriptor());
|
manifest.addTableDescriptor(table.getDescriptor());
|
||||||
|
|
||||||
if (!fs.exists(workingDir)) {
|
if (!fs.exists(workingDir)) {
|
||||||
fs.mkdirs(workingDir);
|
fs.mkdirs(workingDir);
|
||||||
|
|
|
@ -137,7 +137,7 @@ public class OfflineMetaRebuildTestCore {
|
||||||
return this.connection.getTable(tablename);
|
return this.connection.getTable(tablename);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpMeta(HTableDescriptor htd) throws IOException {
|
private void dumpMeta(TableDescriptor htd) throws IOException {
|
||||||
List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
|
List<byte[]> metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName());
|
||||||
for (byte[] row : metaRows) {
|
for (byte[] row : metaRows) {
|
||||||
LOG.info(Bytes.toString(row));
|
LOG.info(Bytes.toString(row));
|
||||||
|
@ -162,7 +162,7 @@ public class OfflineMetaRebuildTestCore {
|
||||||
byte[] startKey, byte[] endKey) throws IOException {
|
byte[] startKey, byte[] endKey) throws IOException {
|
||||||
|
|
||||||
LOG.info("Before delete:");
|
LOG.info("Before delete:");
|
||||||
HTableDescriptor htd = tbl.getTableDescriptor();
|
TableDescriptor htd = tbl.getDescriptor();
|
||||||
dumpMeta(htd);
|
dumpMeta(htd);
|
||||||
|
|
||||||
List<HRegionLocation> regions;
|
List<HRegionLocation> regions;
|
||||||
|
@ -203,7 +203,7 @@ public class OfflineMetaRebuildTestCore {
|
||||||
protected RegionInfo createRegion(Configuration conf, final Table htbl,
|
protected RegionInfo createRegion(Configuration conf, final Table htbl,
|
||||||
byte[] startKey, byte[] endKey) throws IOException {
|
byte[] startKey, byte[] endKey) throws IOException {
|
||||||
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
|
Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
|
||||||
HTableDescriptor htd = htbl.getTableDescriptor();
|
TableDescriptor htd = htbl.getDescriptor();
|
||||||
RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
|
RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName())
|
||||||
.setStartKey(startKey)
|
.setStartKey(startKey)
|
||||||
.setEndKey(endKey)
|
.setEndKey(endKey)
|
||||||
|
|
|
@ -30,7 +30,6 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellBuilder;
|
import org.apache.hadoop.hbase.CellBuilder;
|
||||||
|
@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
|
@ -103,16 +103,13 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of all the column families for a given Table.
|
* Returns a list of all the column families for a given Table.
|
||||||
*
|
|
||||||
* @param table table
|
* @param table table
|
||||||
* @throws IOException
|
|
||||||
*/
|
*/
|
||||||
byte[][] getAllColumns(Table table) throws IOException {
|
byte[][] getAllColumns(Table table) throws IOException {
|
||||||
HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies();
|
ColumnFamilyDescriptor[] cds = table.getDescriptor().getColumnFamilies();
|
||||||
byte[][] columns = new byte[cds.length][];
|
byte[][] columns = new byte[cds.length][];
|
||||||
for (int i = 0; i < cds.length; i++) {
|
for (int i = 0; i < cds.length; i++) {
|
||||||
columns[i] = Bytes.add(cds[i].getName(),
|
columns[i] = Bytes.add(cds[i].getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
|
||||||
KeyValue.COLUMN_FAMILY_DELIM_ARRAY);
|
|
||||||
}
|
}
|
||||||
return columns;
|
return columns;
|
||||||
}
|
}
|
||||||
|
@ -1090,7 +1087,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb
|
||||||
TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
|
TreeMap<ByteBuffer, ColumnDescriptor> columns = new TreeMap<>();
|
||||||
|
|
||||||
table = getTable(tableName);
|
table = getTable(tableName);
|
||||||
HTableDescriptor desc = table.getTableDescriptor();
|
HTableDescriptor desc = new HTableDescriptor(table.getDescriptor());
|
||||||
|
|
||||||
for (HColumnDescriptor e : desc.getFamilies()) {
|
for (HColumnDescriptor e : desc.getFamilies()) {
|
||||||
ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
|
ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e);
|
||||||
|
|
|
@ -227,7 +227,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH
|
||||||
public List<Boolean> existsAll(ByteBuffer table, List<TGet> gets) throws TIOError, TException {
|
public List<Boolean> existsAll(ByteBuffer table, List<TGet> gets) throws TIOError, TException {
|
||||||
Table htable = getTable(table);
|
Table htable = getTable(table);
|
||||||
try {
|
try {
|
||||||
boolean[] exists = htable.existsAll(getsFromThrift(gets));
|
boolean[] exists = htable.exists(getsFromThrift(gets));
|
||||||
List<Boolean> result = new ArrayList<>(exists.length);
|
List<Boolean> result = new ArrayList<>(exists.length);
|
||||||
for (boolean exist : exists) {
|
for (boolean exist : exists) {
|
||||||
result.add(exist);
|
result.add(exist);
|
||||||
|
|
|
@ -29,7 +29,6 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.commons.lang3.NotImplementedException;
|
import org.apache.commons.lang3.NotImplementedException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.CompareOperator;
|
import org.apache.hadoop.hbase.CompareOperator;
|
||||||
|
@ -409,15 +408,13 @@ public class ThriftTable implements Table {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
|
||||||
@Override
|
|
||||||
public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
|
|
||||||
byte[] value, RowMutations mutation) throws IOException {
|
byte[] value, RowMutations mutation) throws IOException {
|
||||||
try {
|
try {
|
||||||
ByteBuffer valueBuffer = value == null? null : ByteBuffer.wrap(value);
|
ByteBuffer valueBuffer = value == null ? null : ByteBuffer.wrap(value);
|
||||||
return client.checkAndMutate(tableNameInBytes, ByteBuffer.wrap(row), ByteBuffer.wrap(family),
|
return client.checkAndMutate(tableNameInBytes, ByteBuffer.wrap(row), ByteBuffer.wrap(family),
|
||||||
ByteBuffer.wrap(qualifier), ThriftUtilities.compareOpFromHBase(op), valueBuffer,
|
ByteBuffer.wrap(qualifier), ThriftUtilities.compareOpFromHBase(op), valueBuffer,
|
||||||
ThriftUtilities.rowMutationsFromHBase(mutation));
|
ThriftUtilities.rowMutationsFromHBase(mutation));
|
||||||
} catch (TException e) {
|
} catch (TException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@ import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.Cell;
|
import org.apache.hadoop.hbase.Cell;
|
||||||
import org.apache.hadoop.hbase.CellUtil;
|
import org.apache.hadoop.hbase.CellUtil;
|
||||||
|
@ -614,7 +613,7 @@ public class TestThriftConnection {
|
||||||
assertTrue(Bytes.equals(VALUE_1, value1));
|
assertTrue(Bytes.equals(VALUE_1, value1));
|
||||||
assertNull(value2);
|
assertNull(value2);
|
||||||
assertTrue(table.exists(get));
|
assertTrue(table.exists(get));
|
||||||
assertEquals(1, table.existsAll(Collections.singletonList(get)).length);
|
assertEquals(1, table.exists(Collections.singletonList(get)).length);
|
||||||
Delete delete = new Delete(ROW_1);
|
Delete delete = new Delete(ROW_1);
|
||||||
|
|
||||||
table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
|
table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
|
||||||
|
|
Loading…
Reference in New Issue