From 9b413cf262eff3e3039265c5cb086cbf43dc3c7c Mon Sep 17 00:00:00 2001 From: Apache9 Date: Mon, 17 Jun 2019 10:18:18 +0800 Subject: [PATCH] HBASE-22590 Remove the deprecated methods in Table interface (#309) Signed-off-by: Jan Hentschel Signed-off-by: Guanghao --- .../hadoop/hbase/backup/TestBackupMerge.java | 2 +- ...estIncrementalBackupMergeWithFailures.java | 2 +- .../apache/hadoop/hbase/client/HTable.java | 87 ---- .../org/apache/hadoop/hbase/client/Table.java | 264 ---------- .../hadoop/hbase/client/TestAsyncProcess.java | 2 +- .../mapreduce/IntegrationTestBulkLoad.java | 2 +- .../mapreduce/TestHFileOutputFormat2.java | 29 +- .../hadoop/hbase/rest/SchemaResource.java | 24 +- .../hbase/rest/client/RemoteHTable.java | 485 +++++++----------- .../hbase/rest/client/TestRemoteTable.java | 13 +- .../resources/hbase-webapps/master/table.jsp | 5 +- .../hbase/client/TestFromClientSide.java | 86 ++-- .../hbase/client/TestFromClientSide3.java | 14 +- .../client/TestIncrementsFromClientSide.java | 36 +- .../TestPassCustomCellViaRegionObserver.java | 5 +- .../hbase/filter/TestMultiRowRangeFilter.java | 57 +- .../hadoop/hbase/master/TestWarmupRegion.java | 5 +- .../hbase/regionserver/RegionAsTable.java | 89 ---- .../TestNewVersionBehaviorFromClientSide.java | 11 +- .../TestPerColumnFamilyFlush.java | 7 - .../TestSettingTimeoutOnBlockingPoint.java | 14 +- .../regionserver/TestWALEntrySinkFilter.java | 72 --- ...estCoprocessorWhitelistMasterObserver.java | 8 +- .../snapshot/TestRegionSnapshotTask.java | 2 +- .../util/hbck/OfflineMetaRebuildTestCore.java | 6 +- .../thrift/ThriftHBaseServiceHandler.java | 11 +- .../thrift2/ThriftHBaseServiceHandler.java | 2 +- .../hbase/thrift2/client/ThriftTable.java | 11 +- .../hbase/thrift2/TestThriftConnection.java | 3 +- 29 files changed, 343 insertions(+), 1011 deletions(-) diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java index 9603c9dfa36..1d241716a1c 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupMerge.java @@ -116,7 +116,7 @@ public class TestBackupMerge extends TestBackupBase { tablesRestoreIncMultiple, tablesMapIncMultiple, true)); Table hTable = conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getTableDescriptor()); + LOG.debug("After incremental restore: " + hTable.getDescriptor()); int countRows = TEST_UTIL.countRows(hTable, famName); LOG.debug("f1 has " + countRows + " rows"); Assert.assertEquals(NB_ROWS_IN_BATCH + 2 * ADD_ROWS, countRows); diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java index 57bdc464099..ba1e0de274b 100644 --- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java +++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java @@ -334,7 +334,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase { tablesRestoreIncMultiple, tablesMapIncMultiple, true)); Table hTable = conn.getTable(table1_restore); - LOG.debug("After incremental restore: " + hTable.getTableDescriptor()); + LOG.debug("After incremental restore: " + hTable.getDescriptor()); LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows"); Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + 2 * ADD_ROWS); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index b83bcd15322..e6ee5aa8f42 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -659,22 +659,6 @@ public class HTable implements Table { callWithRetries(callable, this.operationTimeoutMs); } - @Override - @Deprecated - public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier, - final byte [] value, final Put put) throws IOException { - return doCheckAndPut(row, family, qualifier, CompareOperator.EQUAL.name(), value, null, put); - } - - @Override - @Deprecated - public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte [] value, final Put put) throws IOException { - // The name of the operators in CompareOperator are intentionally those of the - // operators in the filter's CompareOp enum. - return doCheckAndPut(row, family, qualifier, op.name(), value, null, put); - } - private boolean doCheckAndPut(final byte[] row, final byte[] family, final byte[] qualifier, final String opName, final byte[] value, final TimeRange timeRange, final Put put) throws IOException { @@ -695,21 +679,6 @@ public class HTable implements Table { .callWithRetries(callable, this.operationTimeoutMs); } - @Override - @Deprecated - public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final byte[] value, final Delete delete) throws IOException { - return doCheckAndDelete(row, family, qualifier, CompareOperator.EQUAL.name(), value, null, - delete); - } - - @Override - @Deprecated - public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Delete delete) throws IOException { - return doCheckAndDelete(row, family, qualifier, op.name(), value, null, delete); - } - private boolean doCheckAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, final String opName, final byte[] value, final TimeRange timeRange, final Delete delete) throws IOException { @@ -801,13 +770,6 @@ public class HTable implements Table { return ((Result)results[0]).getExists(); } - @Override - @Deprecated - public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte [] value, final RowMutations rm) throws IOException { - return doCheckAndMutate(row, family, qualifier, op.name(), value, null, rm); - } - @Override public boolean exists(final Get get) throws IOException { Result r = get(get, true); @@ -981,70 +943,21 @@ public class HTable implements Table { return unit.convert(rpcTimeoutMs, TimeUnit.MILLISECONDS); } - @Override - @Deprecated - public int getRpcTimeout() { - return rpcTimeoutMs; - } - - @Override - @Deprecated - public void setRpcTimeout(int rpcTimeout) { - setReadRpcTimeout(rpcTimeout); - setWriteRpcTimeout(rpcTimeout); - } - @Override public long getReadRpcTimeout(TimeUnit unit) { return unit.convert(readRpcTimeoutMs, TimeUnit.MILLISECONDS); } - @Override - @Deprecated - public int getReadRpcTimeout() { - return readRpcTimeoutMs; - } - - @Override - @Deprecated - public void setReadRpcTimeout(int readRpcTimeout) { - this.readRpcTimeoutMs = readRpcTimeout; - } - @Override public long getWriteRpcTimeout(TimeUnit unit) { return unit.convert(writeRpcTimeoutMs, TimeUnit.MILLISECONDS); } - @Override - @Deprecated - public int getWriteRpcTimeout() { - return writeRpcTimeoutMs; - } - - @Override - @Deprecated - public void setWriteRpcTimeout(int writeRpcTimeout) { - this.writeRpcTimeoutMs = writeRpcTimeout; - } - @Override public long getOperationTimeout(TimeUnit unit) { return unit.convert(operationTimeoutMs, TimeUnit.MILLISECONDS); } - @Override - @Deprecated - public int getOperationTimeout() { - return operationTimeoutMs; - } - - @Override - @Deprecated - public void setOperationTimeout(int operationTimeout) { - this.operationTimeoutMs = operationTimeout; - } - @Override public String toString() { return tableName + ";" + connection; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java index 9268b13dfeb..6e28d243216 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java @@ -32,7 +32,6 @@ import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.io.TimeRange; @@ -66,23 +65,6 @@ public interface Table extends Closeable { */ Configuration getConfiguration(); - /** - * Gets the {@link org.apache.hadoop.hbase.HTableDescriptor table descriptor} for this table. - * @throws java.io.IOException if a remote or network exception occurs. - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getDescriptor()} - */ - @Deprecated - default HTableDescriptor getTableDescriptor() throws IOException { - TableDescriptor descriptor = getDescriptor(); - - if (descriptor instanceof HTableDescriptor) { - return (HTableDescriptor)descriptor; - } else { - return new HTableDescriptor(descriptor); - } - } - /** * Gets the {@link org.apache.hadoop.hbase.client.TableDescriptor table descriptor} for this table. * @throws java.io.IOException if a remote or network exception occurs. @@ -131,24 +113,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Test for the existence of columns in the table, as specified by the Gets. - * This will return an array of booleans. Each value will be true if the related Get matches - * one or more keys, false if not. - * This is a server-side call so it prevents any data from being transferred to - * the client. - * - * @param gets the Gets - * @return Array of boolean. True if the specified Get matches one or more keys, false if not. - * @throws IOException e - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #exists(List)} - */ - @Deprecated - default boolean[] existsAll(List gets) throws IOException { - return exists(gets); - } - /** * Method that does a batch call on Deletes, Gets, Puts, Increments, Appends, RowMutations. * The ordering of execution of the actions is not defined. Meaning if you do a Put and a @@ -284,55 +248,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param value the expected value - * @param put data to put if check succeeds - * @throws IOException e - * @return true if the new put was executed, false otherwise - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} - */ - @Deprecated - default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) - throws IOException { - return checkAndPut(row, family, qualifier, CompareOperator.EQUAL, value, put); - } - - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the put. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the put. - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param op comparison operator to use - * @param value the expected value - * @param put data to put if check succeeds - * @throws IOException e - * @return true if the new put was executed, false otherwise - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} - */ - @Deprecated - default boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, Put put) throws IOException { - RowMutations mutations = new RowMutations(put.getRow(), 1); - mutations.add(put); - - return checkAndMutate(row, family, qualifier, op, value, mutations); - } - /** * Deletes the specified cells/row. * @@ -371,55 +286,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existance) - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param value the expected value - * @param delete data to delete if check succeeds - * @throws IOException e - * @return true if the new delete was executed, false otherwise - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} - */ - @Deprecated - default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - return checkAndDelete(row, family, qualifier, CompareOperator.EQUAL, value, delete); - } - - /** - * Atomically checks if a row/family/qualifier value matches the expected - * value. If it does, it adds the delete. If the passed value is null, the - * check is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> add the delete. - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param op comparison operator to use - * @param value the expected value - * @param delete data to delete if check succeeds - * @throws IOException e - * @return true if the new delete was executed, false otherwise - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} - */ - @Deprecated - default boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOperator op, byte[] value, Delete delete) throws IOException { - RowMutations mutations = new RowMutations(delete.getRow(), 1); - mutations.add(delete); - - return checkAndMutate(row, family, qualifier, op, value, mutations); - } - /** * Atomically checks if a row/family/qualifier value matches the expected value. If it does, it * adds the Put/Delete/RowMutations. @@ -731,32 +597,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Atomically checks if a row/family/qualifier value matches the expected value. - * If it does, it performs the row mutations. If the passed value is null, the check - * is for the lack of column (ie: non-existence) - * - * The expected value argument of this call is on the left and the current - * value of the cell is on the right side of the comparison operator. - * - * Ie. eg. GREATER operator means expected value > existing <=> perform row mutations. - * - * @param row to check - * @param family column family to check - * @param qualifier column qualifier to check - * @param op the comparison operator - * @param value the expected value - * @param mutation mutations to perform if check succeeds - * @throws IOException e - * @return true if the new put was executed, false otherwise - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #checkAndMutate(byte[], byte[])} - */ - @Deprecated - default boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, - byte[] value, RowMutations mutation) throws IOException { - throw new NotImplementedException("Add an implementation!"); - } - /** * Get timeout of each rpc request in this Table instance. It will be overridden by a more * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout. @@ -769,36 +609,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Get timeout (millisecond) of each rpc request in this Table instance. - * - * @return Currently configured read timeout - * @deprecated use {@link #getReadRpcTimeout(TimeUnit)} or - * {@link #getWriteRpcTimeout(TimeUnit)} instead - */ - @Deprecated - default int getRpcTimeout() { - return (int)getRpcTimeout(TimeUnit.MILLISECONDS); - } - - /** - * Set timeout (millisecond) of each rpc request in operations of this Table instance, will - * override the value of hbase.rpc.timeout in configuration. - * If a rpc request waiting too long, it will stop waiting and send a new request to retry until - * retries exhausted or operation timeout reached. - *

- * NOTE: This will set both the read and write timeout settings to the provided value. - * - * @param rpcTimeout the timeout of each rpc request in millisecond. - * - * @deprecated Use setReadRpcTimeout or setWriteRpcTimeout instead - */ - @Deprecated - default void setRpcTimeout(int rpcTimeout) { - setReadRpcTimeout(rpcTimeout); - setWriteRpcTimeout(rpcTimeout); - } - /** * Get timeout of each rpc read request in this Table instance. * @param unit the unit of time the timeout to be represented in @@ -808,30 +618,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Get timeout (millisecond) of each rpc read request in this Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getReadRpcTimeout(TimeUnit)} instead - */ - @Deprecated - default int getReadRpcTimeout() { - return (int)getReadRpcTimeout(TimeUnit.MILLISECONDS); - } - - /** - * Set timeout (millisecond) of each rpc read request in operations of this Table instance, will - * override the value of hbase.rpc.read.timeout in configuration. - * If a rpc read request waiting too long, it will stop waiting and send a new request to retry - * until retries exhausted or operation timeout reached. - * - * @param readRpcTimeout the timeout for read rpc request in milliseconds - * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead - */ - @Deprecated - default void setReadRpcTimeout(int readRpcTimeout) { - throw new NotImplementedException("Add an implementation!"); - } - /** * Get timeout of each rpc write request in this Table instance. * @param unit the unit of time the timeout to be represented in @@ -841,30 +627,6 @@ public interface Table extends Closeable { throw new NotImplementedException("Add an implementation!"); } - /** - * Get timeout (millisecond) of each rpc write request in this Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getWriteRpcTimeout(TimeUnit)} instead - */ - @Deprecated - default int getWriteRpcTimeout() { - return (int)getWriteRpcTimeout(TimeUnit.MILLISECONDS); - } - - /** - * Set timeout (millisecond) of each rpc write request in operations of this Table instance, will - * override the value of hbase.rpc.write.timeout in configuration. - * If a rpc write request waiting too long, it will stop waiting and send a new request to retry - * until retries exhausted or operation timeout reached. - * - * @param writeRpcTimeout the timeout for write rpc request in milliseconds - * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead - */ - @Deprecated - default void setWriteRpcTimeout(int writeRpcTimeout) { - throw new NotImplementedException("Add an implementation!"); - } - /** * Get timeout of each operation in Table instance. * @param unit the unit of time the timeout to be represented in @@ -873,30 +635,4 @@ public interface Table extends Closeable { default long getOperationTimeout(TimeUnit unit) { throw new NotImplementedException("Add an implementation!"); } - - /** - * Get timeout (millisecond) of each operation for in Table instance. - * @deprecated since 2.0 and will be removed in 3.0 version - * use {@link #getOperationTimeout(TimeUnit)} instead - */ - @Deprecated - default int getOperationTimeout() { - return (int)getOperationTimeout(TimeUnit.MILLISECONDS); - } - - /** - * Set timeout (millisecond) of each operation in this Table instance, will override the value - * of hbase.client.operation.timeout in configuration. - * Operation timeout is a top-level restriction that makes sure a blocking method will not be - * blocked more than this. In each operation, if rpc request fails because of timeout or - * other reason, it will retry until success or throw a RetriesExhaustedException. But if the - * total time being blocking reach the operation timeout before retries exhausted, it will break - * early and throw SocketTimeoutException. - * @param operationTimeout the total timeout of each operation in millisecond. - * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead - */ - @Deprecated - default void setOperationTimeout(int operationTimeout) { - throw new NotImplementedException("Add an implementation!"); - } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index d5e8dccf479..894f6f94b75 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -1353,7 +1353,7 @@ public class TestAsyncProcess { ap.previousTimeout = -1; try { - ht.existsAll(gets); + ht.exists(gets); } catch (ClassCastException e) { // No result response on this test. } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java index a28c9f69a99..51497fc86d3 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestBulkLoad.java @@ -298,7 +298,7 @@ public class IntegrationTestBulkLoad extends IntegrationTestBase { RegionLocator regionLocator = conn.getRegionLocator(getTablename())) { // Configure the partitioner and other things needed for HFileOutputFormat. - HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); + HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); // Run the job making sure it works. assertEquals(true, job.waitForCompletion(true)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 5c0bb2b62ee..b75e0f9cb1f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -75,6 +75,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -521,7 +522,7 @@ public class TestHFileOutputFormat2 { RegionLocator regionLocator = Mockito.mock(RegionLocator.class); setupMockStartKeys(regionLocator); setupMockTableName(regionLocator); - HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); + HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); assertEquals(job.getNumReduceTasks(), 4); } @@ -631,7 +632,7 @@ public class TestHFileOutputFormat2 { assertEquals("Should make " + regionNum + " regions", numRegions, regionNum); allTables.put(tableStrSingle, table); - tableInfo.add(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), r)); + tableInfo.add(new HFileOutputFormat2.TableInfo(table.getDescriptor(), r)); } Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); // Generate the bulk load files @@ -818,7 +819,7 @@ public class TestHFileOutputFormat2 { conf.set(HFileOutputFormat2.COMPRESSION_FAMILIES_CONF_KEY, HFileOutputFormat2.serializeColumnFamilyAttribute (HFileOutputFormat2.compressionDetails, - Arrays.asList(table.getTableDescriptor()))); + Arrays.asList(table.getDescriptor()))); // read back family specific compression setting from the configuration Map retrievedFamilyToCompressionMap = HFileOutputFormat2 @@ -844,7 +845,7 @@ public class TestHFileOutputFormat2 { .setBlockCacheEnabled(false) .setTimeToLive(0)); } - Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); + Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** @@ -890,7 +891,7 @@ public class TestHFileOutputFormat2 { familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getTableDescriptor()))); + Arrays.asList(table.getDescriptor()))); // read back family specific data block encoding settings from the // configuration @@ -918,7 +919,7 @@ public class TestHFileOutputFormat2 { .setBlockCacheEnabled(false) .setTimeToLive(0)); } - Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); + Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** @@ -962,7 +963,7 @@ public class TestHFileOutputFormat2 { conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, HFileOutputFormat2.serializeColumnFamilyAttribute (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getTableDescriptor()))); + .getDescriptor()))); // read back family specific data block encoding settings from the // configuration @@ -991,7 +992,7 @@ public class TestHFileOutputFormat2 { .setBlockCacheEnabled(false) .setTimeToLive(0)); } - Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); + Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** @@ -1036,7 +1037,7 @@ public class TestHFileOutputFormat2 { Table table = Mockito.mock(Table.class); setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); - HTableDescriptor tableDescriptor = table.getTableDescriptor(); + TableDescriptor tableDescriptor = table.getDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, HFileOutputFormat2.serializeColumnFamilyAttribute (HFileOutputFormat2.dataBlockEncodingDetails, Arrays @@ -1068,7 +1069,7 @@ public class TestHFileOutputFormat2 { .setBlockCacheEnabled(false) .setTimeToLive(0)); } - Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); + Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); } /** @@ -1126,7 +1127,7 @@ public class TestHFileOutputFormat2 { Table table = Mockito.mock(Table.class); RegionLocator regionLocator = Mockito.mock(RegionLocator.class); HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]); - Mockito.doReturn(htd).when(table).getTableDescriptor(); + Mockito.doReturn(htd).when(table).getDescriptor(); for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } @@ -1146,7 +1147,7 @@ public class TestHFileOutputFormat2 { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilySettings")); setupRandomGeneratorMapper(job, false); - HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); + HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); @@ -1248,7 +1249,7 @@ public class TestHFileOutputFormat2 { for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + .getDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } @@ -1341,7 +1342,7 @@ public class TestHFileOutputFormat2 { RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), regionLocator)), testDir, false); + .getDescriptor(), regionLocator)), testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index 786fcb60537..1260101eef9 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -1,5 +1,4 @@ -/* - * +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Map; - import javax.ws.rs.Consumes; import javax.ws.rs.DELETE; import javax.ws.rs.GET; @@ -35,20 +32,19 @@ import javax.ws.rs.core.Response; import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.UriInfo; import javax.xml.namespace.QName; - import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.rest.model.ColumnSchemaModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private public class SchemaResource extends ResourceBase { @@ -65,21 +61,15 @@ public class SchemaResource extends ResourceBase { /** * Constructor - * @param tableResource - * @throws IOException */ public SchemaResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } - private HTableDescriptor getTableSchema() throws IOException, - TableNotFoundException { - Table table = servlet.getTable(tableResource.getName()); - try { - return table.getTableDescriptor(); - } finally { - table.close(); + private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException { + try (Table table = servlet.getTable(tableResource.getName())) { + return new HTableDescriptor(table.getDescriptor()); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java index bdb383856ef..8d4ce934ce9 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java @@ -22,20 +22,27 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; - +import java.io.IOException; +import java.io.InterruptedIOException; +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -63,19 +70,9 @@ import org.apache.hadoop.hbase.rest.model.ScannerModel; import org.apache.hadoop.hbase.rest.model.TableSchemaModel; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; - -import java.io.IOException; -import java.io.InterruptedIOException; -import java.io.UnsupportedEncodingException; -import java.net.URLEncoder; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.concurrent.TimeUnit; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; @@ -94,8 +91,8 @@ public class RemoteHTable implements Table { final long sleepTime; @SuppressWarnings("rawtypes") - protected String buildRowSpec(final byte[] row, final Map familyMap, - final long startTime, final long endTime, final int maxVersions) { + protected String buildRowSpec(final byte[] row, final Map familyMap, final long startTime, + final long endTime, final int maxVersions) { StringBuffer sb = new StringBuffer(); sb.append('/'); sb.append(Bytes.toString(name)); @@ -106,15 +103,15 @@ public class RemoteHTable implements Table { Iterator i = familyMap.entrySet().iterator(); sb.append('/'); while (i.hasNext()) { - Map.Entry e = (Map.Entry)i.next(); - Collection quals = (Collection)e.getValue(); + Map.Entry e = (Map.Entry) i.next(); + Collection quals = (Collection) e.getValue(); if (quals == null || quals.isEmpty()) { // this is an unqualified family. append the family name and NO ':' - sb.append(toURLEncodedBytes((byte[])e.getKey())); + sb.append(toURLEncodedBytes((byte[]) e.getKey())); } else { Iterator ii = quals.iterator(); while (ii.hasNext()) { - sb.append(toURLEncodedBytes((byte[])e.getKey())); + sb.append(toURLEncodedBytes((byte[]) e.getKey())); Object o = ii.next(); // Puts use byte[] but Deletes use KeyValue if (o instanceof byte[]) { @@ -165,7 +162,7 @@ public class RemoteHTable implements Table { return sb.toString(); } sb.append("?"); - for(int i=0; i results = new ArrayList<>(); - for (RowModel row: model.getRows()) { + for (RowModel row : model.getRows()) { List kvs = new ArrayList<>(row.getCells().size()); - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[][] split = CellUtil.parseColumn(cell.getColumn()); byte[] column = split[0]; byte[] qualifier = null; @@ -194,8 +191,8 @@ public class RemoteHTable implements Table { } else { throw new IllegalArgumentException("Invalid familyAndQualifier provided."); } - kvs.add(new KeyValue(row.getKey(), column, qualifier, - cell.getTimestamp(), cell.getValue())); + kvs + .add(new KeyValue(row.getKey(), column, qualifier, cell.getTimestamp(), cell.getValue())); } results.add(Result.create(kvs)); } @@ -205,11 +202,10 @@ public class RemoteHTable implements Table { protected CellSetModel buildModelFromPut(Put put) { RowModel row = new RowModel(put.getRow()); long ts = put.getTimestamp(); - for (List cells: put.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : put.getFamilyCellMap().values()) { + for (Cell cell : cells) { row.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), - CellUtil.cloneValue(cell))); + ts != HConstants.LATEST_TIMESTAMP ? ts : cell.getTimestamp(), CellUtil.cloneValue(cell))); } } CellSetModel model = new CellSetModel(); @@ -256,36 +252,6 @@ public class RemoteHTable implements Table { return conf; } - @Override - @Deprecated - public HTableDescriptor getTableDescriptor() throws IOException { - StringBuilder sb = new StringBuilder(); - sb.append('/'); - sb.append(Bytes.toString(name)); - sb.append('/'); - sb.append("schema"); - for (int i = 0; i < maxRetries; i++) { - Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); - int code = response.getCode(); - switch (code) { - case 200: - TableSchemaModel schema = new TableSchemaModel(); - schema.getObjectFromMessage(response.getBody()); - return schema.getTableDescriptor(); - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("schema request returned " + code); - } - } - throw new IOException("schema request timed out"); - } - @Override public void close() throws IOException { client.shutdown(); @@ -294,8 +260,8 @@ public class RemoteHTable implements Table { @Override public Result get(Get get) throws IOException { TimeRange range = get.getTimeRange(); - String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), - range.getMin(), range.getMax(), get.getMaxVersions()); + String spec = buildRowSpec(get.getRow(), get.getFamilyMap(), range.getMin(), range.getMax(), + get.getMaxVersions()); if (get.getFilter() != null) { LOG.warn("filters not supported on gets"); } @@ -316,12 +282,13 @@ public class RemoteHTable implements Table { int maxVersions = 1; int count = 0; - for(Get g:gets) { + for (Get g : gets) { - if ( count == 0 ) { + if (count == 0) { maxVersions = g.getMaxVersions(); } else if (g.getMaxVersions() != maxVersions) { - LOG.warn("MaxVersions on Gets do not match, using the first in the list ("+maxVersions+")"); + LOG.warn( + "MaxVersions on Gets do not match, using the first in the list (" + maxVersions + ")"); } if (g.getFilter() != null) { @@ -329,7 +296,7 @@ public class RemoteHTable implements Table { } rows[count] = g.getRow(); - count ++; + count++; } String spec = buildMultiRowSpec(rows, maxVersions); @@ -346,7 +313,7 @@ public class RemoteHTable implements Table { CellSetModel model = new CellSetModel(); model.getObjectFromMessage(response.getBody()); Result[] results = buildResultFromModel(model); - if ( results.length > 0) { + if (results.length > 0) { return results; } // fall through @@ -357,7 +324,7 @@ public class RemoteHTable implements Table { try { Thread.sleep(sleepTime); } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); + throw (InterruptedIOException) new InterruptedIOException().initCause(e); } break; default: @@ -393,21 +360,21 @@ public class RemoteHTable implements Table { sb.append('/'); sb.append(toURLEncodedBytes(put.getRow())); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("put request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("put request failed with " + code); } } throw new IOException("put request timed out"); @@ -419,24 +386,24 @@ public class RemoteHTable implements Table { // ignores the row specification in the URI // separate puts by row - TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Put put: puts) { + TreeMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Put put : puts) { byte[] row = put.getRow(); List cells = map.get(row); if (cells == null) { cells = new ArrayList<>(); map.put(row, cells); } - for (List l: put.getFamilyCellMap().values()) { + for (List l : put.getFamilyCellMap().values()) { cells.addAll(l); } } // build the cell set CellSetModel model = new CellSetModel(); - for (Map.Entry> e: map.entrySet()) { + for (Map.Entry> e : map.entrySet()) { RowModel row = new RowModel(e.getKey()); - for (Cell cell: e.getValue()) { + for (Cell cell : e.getValue()) { row.addCell(new CellModel(cell)); } model.addRow(row); @@ -448,21 +415,21 @@ public class RemoteHTable implements Table { sb.append(Bytes.toString(name)); sb.append("/$multiput"); // can be any nonexistent row for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, - model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("multiput request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("multiput request failed with " + code); } } throw new IOException("multiput request timed out"); @@ -470,23 +437,23 @@ public class RemoteHTable implements Table { @Override public void delete(Delete delete) throws IOException { - String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), - delete.getTimestamp(), delete.getTimestamp(), 1); + String spec = buildRowSpec(delete.getRow(), delete.getFamilyCellMap(), delete.getTimestamp(), + delete.getTimestamp(), 1); for (int i = 0; i < maxRetries; i++) { Response response = client.delete(spec); int code = response.getCode(); switch (code) { - case 200: - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("delete request failed with " + code); + case 200: + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("delete request failed with " + code); } } throw new IOException("delete request timed out"); @@ -494,7 +461,7 @@ public class RemoteHTable implements Table { @Override public void delete(List deletes) throws IOException { - for (Delete delete: deletes) { + for (Delete delete : deletes) { delete(delete); } } @@ -505,7 +472,31 @@ public class RemoteHTable implements Table { @Override public TableDescriptor getDescriptor() throws IOException { - return getTableDescriptor(); + StringBuilder sb = new StringBuilder(); + sb.append('/'); + sb.append(Bytes.toString(name)); + sb.append('/'); + sb.append("schema"); + for (int i = 0; i < maxRetries; i++) { + Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); + int code = response.getCode(); + switch (code) { + case 200: + TableSchemaModel schema = new TableSchemaModel(); + schema.getObjectFromMessage(response.getBody()); + return schema.getTableDescriptor(); + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("schema request returned " + code); + } + } + throw new IOException("schema request timed out"); } class Scanner implements ResultScanner { @@ -525,22 +516,22 @@ public class RemoteHTable implements Table { sb.append('/'); sb.append("scanner"); for (int i = 0; i < maxRetries; i++) { - Response response = client.post(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.post(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 201: - uri = response.getLocation(); - return; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("scan request failed with " + code); + case 201: + uri = response.getLocation(); + return; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scan request failed with " + code); } } throw new IOException("scan request timed out"); @@ -552,26 +543,25 @@ public class RemoteHTable implements Table { sb.append("?n="); sb.append(nbRows); for (int i = 0; i < maxRetries; i++) { - Response response = client.get(sb.toString(), - Constants.MIMETYPE_PROTOBUF); + Response response = client.get(sb.toString(), Constants.MIMETYPE_PROTOBUF); int code = response.getCode(); switch (code) { - case 200: - CellSetModel model = new CellSetModel(); - model.getObjectFromMessage(response.getBody()); - return buildResultFromModel(model); - case 204: - case 206: - return null; - case 509: - try { - Thread.sleep(sleepTime); - } catch (InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("scanner.next request failed with " + code); + case 200: + CellSetModel model = new CellSetModel(); + model.getObjectFromMessage(response.getBody()); + return buildResultFromModel(model); + case 204: + case 206: + return null; + case 509: + try { + Thread.sleep(sleepTime); + } catch (InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("scanner.next request failed with " + code); } } throw new IOException("scanner.next request timed out"); @@ -660,8 +650,7 @@ public class RemoteHTable implements Table { } @Override - public ResultScanner getScanner(byte[] family, byte[] qualifier) - throws IOException { + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { Scan scan = new Scan(); scan.addColumn(family, qualifier); return new Scanner(scan); @@ -671,15 +660,8 @@ public class RemoteHTable implements Table { return true; } - @Override - @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { - return doCheckAndPut(row, family, qualifier, value, put); - } - - private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Put put) throws IOException { + private boolean doCheckAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) + throws IOException { // column to check-the-value put.add(new KeyValue(row, family, qualifier, value)); @@ -692,43 +674,30 @@ public class RemoteHTable implements Table { sb.append("?check=put"); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return true; - case 304: // NOT-MODIFIED - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (final InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("checkAndPut request failed with " + code); + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndPut request failed with " + code); } } throw new IOException("checkAndPut request timed out"); } - @Override - @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Put put) throws IOException { - throw new IOException("checkAndPut for non-equal comparison not implemented"); - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { - return doCheckAndDelete(row, family, qualifier, value, delete); - } - - private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, - byte[] value, Delete delete) throws IOException { + private boolean doCheckAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, + Delete delete) throws IOException { Put put = new Put(row); put.setFamilyCellMap(delete.getFamilyCellMap()); // column to check-the-value @@ -742,47 +711,33 @@ public class RemoteHTable implements Table { sb.append("?check=delete"); for (int i = 0; i < maxRetries; i++) { - Response response = client.put(sb.toString(), - Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); + Response response = + client.put(sb.toString(), Constants.MIMETYPE_PROTOBUF, model.createProtobufOutput()); int code = response.getCode(); switch (code) { - case 200: - return true; - case 304: // NOT-MODIFIED - return false; - case 509: - try { - Thread.sleep(sleepTime); - } catch (final InterruptedException e) { - throw (InterruptedIOException)new InterruptedIOException().initCause(e); - } - break; - default: - throw new IOException("checkAndDelete request failed with " + code); + case 200: + return true; + case 304: // NOT-MODIFIED + return false; + case 509: + try { + Thread.sleep(sleepTime); + } catch (final InterruptedException e) { + throw (InterruptedIOException) new InterruptedIOException().initCause(e); + } + break; + default: + throw new IOException("checkAndDelete request failed with " + code); } } throw new IOException("checkAndDelete request timed out"); } - @Override - @Deprecated - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Delete delete) throws IOException { - throw new IOException("checkAndDelete for non-equal comparison not implemented"); - } - @Override public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { return new CheckAndMutateBuilderImpl(row, family); } - @Override - @Deprecated - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, RowMutations rm) throws IOException { - throw new UnsupportedOperationException("checkAndMutate not implemented"); - } - @Override public Result increment(Increment increment) throws IOException { throw new IOException("Increment not supported"); @@ -794,14 +749,14 @@ public class RemoteHTable implements Table { } @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount) throws IOException { + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) + throws IOException { throw new IOException("incrementColumnValue not supported"); } @Override - public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) throws IOException { + public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, + Durability durability) throws IOException { throw new IOException("incrementColumnValue not supported"); } @@ -822,15 +777,14 @@ public class RemoteHTable implements Table { } @Override - public Map coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable) - throws ServiceException, Throwable { + public Map coprocessorService(Class service, byte[] startKey, + byte[] endKey, Batch.Call callable) throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @Override - public void coprocessorService(Class service, - byte[] startKey, byte[] endKey, Batch.Call callable, Batch.Callback callback) + public void coprocessorService(Class service, byte[] startKey, + byte[] endKey, Batch.Call callable, Batch.Callback callback) throws ServiceException, Throwable { throw new UnsupportedOperationException("coprocessorService not implemented"); } @@ -842,93 +796,42 @@ public class RemoteHTable implements Table { @Override public Map batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + Descriptors.MethodDescriptor method, Message request, byte[] startKey, byte[] endKey, + R responsePrototype) throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } @Override - public void batchCoprocessorService( - Descriptors.MethodDescriptor method, Message request, - byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) + public void batchCoprocessorService(Descriptors.MethodDescriptor method, + Message request, byte[] startKey, byte[] endKey, R responsePrototype, Callback callback) throws ServiceException, Throwable { throw new UnsupportedOperationException("batchCoprocessorService not implemented"); } - @Override - @Deprecated - public void setOperationTimeout(int operationTimeout) { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public int getOperationTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public void setRpcTimeout(int rpcTimeout) { - throw new UnsupportedOperationException(); - } - @Override public long getReadRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public int getRpcTimeout() { - throw new UnsupportedOperationException(); - } - @Override public long getRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public int getReadRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public void setReadRpcTimeout(int readRpcTimeout) { - throw new UnsupportedOperationException(); - } - @Override public long getWriteRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public int getWriteRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public void setWriteRpcTimeout(int writeRpcTimeout) { - throw new UnsupportedOperationException(); - } - @Override public long getOperationTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } /* - * Only a small subset of characters are valid in URLs. - * - * Row keys, column families, and qualifiers cannot be appended to URLs without first URL - * escaping. Table names are ok because they can only contain alphanumeric, ".","_", and "-" - * which are valid characters in URLs. + * Only a small subset of characters are valid in URLs. Row keys, column families, and qualifiers + * cannot be appended to URLs without first URL escaping. Table names are ok because they can only + * contain alphanumeric, ".","_", and "-" which are valid characters in URLs. */ private static String toURLEncodedBytes(byte[] row) { try { @@ -953,7 +856,7 @@ public class RemoteHTable implements Table { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -964,8 +867,8 @@ public class RemoteHTable implements Table { @Override public CheckAndMutateBuilder ifNotExists() { - throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison " - + "not implemented"); + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); } @Override @@ -974,8 +877,8 @@ public class RemoteHTable implements Table { this.value = Preconditions.checkNotNull(value, "value is null"); return this; } else { - throw new UnsupportedOperationException("CheckAndMutate for non-equal comparison " + - "not implemented"); + throw new UnsupportedOperationException( + "CheckAndMutate for non-equal comparison " + "not implemented"); } } diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java index 3dae90cd11e..78c7f0009a8 100644 --- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java +++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.rest.HBaseRESTTestingUtility; import org.apache.hadoop.hbase.rest.RESTServlet; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -152,13 +153,9 @@ public class TestRemoteTable { @Test public void testGetTableDescriptor() throws IOException { - Table table = null; - try { - table = TEST_UTIL.getConnection().getTable(TABLE); - HTableDescriptor local = table.getTableDescriptor(); - assertEquals(remoteTable.getTableDescriptor(), local); - } finally { - if (null != table) table.close(); + try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) { + TableDescriptor local = table.getDescriptor(); + assertEquals(remoteTable.getDescriptor(), new HTableDescriptor(local)); } } @@ -505,7 +502,7 @@ public class TestRemoteTable { assertTrue(Bytes.equals(VALUE_1, value1)); assertNull(value2); assertTrue(remoteTable.exists(get)); - assertEquals(1, remoteTable.existsAll(Collections.singletonList(get)).length); + assertEquals(1, remoteTable.exists(Collections.singletonList(get)).length); Delete delete = new Delete(ROW_1); remoteTable.checkAndMutate(ROW_1, COLUMN_1).qualifier(QUALIFIER_1) diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 8c60e636ba4..ec388b0b7c8 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -31,6 +31,7 @@ import="java.util.TreeMap" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HColumnDescriptor" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HRegionLocation" @@ -131,7 +132,7 @@ if ( fqtn != null ) { try { table = master.getConnection().getTable(TableName.valueOf(fqtn)); - if (table.getTableDescriptor().getRegionReplication() > 1) { + if (table.getDescriptor().getRegionReplication() > 1) { tableHeader = "

Table Regions

"; withReplica = true; } else { @@ -365,7 +366,7 @@ if ( fqtn != null ) { <% - Collection families = table.getTableDescriptor().getFamilies(); + Collection families = new HTableDescriptor(table.getDescriptor()).getFamilies(); for (HColumnDescriptor family: families) { %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 7b208b864b3..f3e0981dd42 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.client; +import static org.apache.hadoop.hbase.HBaseTestingUtility.countRows; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -59,7 +60,6 @@ import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.PrivateCellUtil; -import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -100,7 +100,6 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.NonRepeatedEnvironmentEdge; -import org.apache.hadoop.hbase.util.Pair; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -180,31 +179,24 @@ public class TestFromClientSide { // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); - Connection connection = ConnectionFactory.createConnection(c); - try (Table t = connection.getTable(TableName.valueOf(name.getMethodName()))) { - if (t instanceof HTable) { - HTable table = (HTable) t; - table.setOperationTimeout(3 * 1000); - try { - Append append = new Append(ROW); - append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE); - Result result = table.append(append); + try (Connection connection = ConnectionFactory.createConnection(c); + Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) + .setOperationTimeout(3 * 1000).build()) { + Append append = new Append(ROW); + append.addColumn(HBaseTestingUtility.fam1, QUALIFIER, VALUE); + Result result = table.append(append); - // Verify expected result - Cell[] cells = result.rawCells(); - assertEquals(1, cells.length); - assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE); + // Verify expected result + Cell[] cells = result.rawCells(); + assertEquals(1, cells.length); + assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE); - // Verify expected result again - Result readResult = table.get(new Get(ROW)); - cells = readResult.rawCells(); - assertEquals(1, cells.length); - assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE); - } finally { - connection.close(); - } - } + // Verify expected result again + Result readResult = table.get(new Get(ROW)); + cells = readResult.rawCells(); + assertEquals(1, cells.length); + assertKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, VALUE); } } @@ -484,7 +476,7 @@ public class TestFromClientSide { byte[] endKey = regions.get(0).getRegion().getEndKey(); // Count rows with a filter that stops us before passed 'endKey'. // Should be count of rows in first region. - int endKeyCount = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey)); + int endKeyCount = countRows(t, createScanWithRowFilter(endKey)); assertTrue(endKeyCount < rowCount); // How do I know I did not got to second region? Thats tough. Can't really @@ -496,30 +488,29 @@ public class TestFromClientSide { // New test. Make it so scan goes into next region by one and then two. // Make sure count comes out right. byte[] key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 1)}; - int plusOneCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key)); + int plusOneCount = countRows(t, createScanWithRowFilter(key)); assertEquals(endKeyCount + 1, plusOneCount); key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] + 2)}; - int plusTwoCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key)); + int plusTwoCount = countRows(t, createScanWithRowFilter(key)); assertEquals(endKeyCount + 2, plusTwoCount); // New test. Make it so I scan one less than endkey. key = new byte[]{endKey[0], endKey[1], (byte) (endKey[2] - 1)}; - int minusOneCount = TEST_UTIL.countRows(t, createScanWithRowFilter(key)); + int minusOneCount = countRows(t, createScanWithRowFilter(key)); assertEquals(endKeyCount - 1, minusOneCount); // For above test... study logs. Make sure we do "Finished with scanning.." // in first region and that we do not fall into the next region. - key = new byte[]{'a', 'a', 'a'}; - int countBBB = TEST_UTIL.countRows(t, - createScanWithRowFilter(key, null, CompareOperator.EQUAL)); + key = new byte[] { 'a', 'a', 'a' }; + int countBBB = countRows(t, createScanWithRowFilter(key, null, CompareOperator.EQUAL)); assertEquals(1, countBBB); - int countGreater = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey, null, - CompareOperator.GREATER_OR_EQUAL)); + int countGreater = + countRows(t, createScanWithRowFilter(endKey, null, CompareOperator.GREATER_OR_EQUAL)); // Because started at start of table. assertEquals(0, countGreater); - countGreater = TEST_UTIL.countRows(t, createScanWithRowFilter(endKey, endKey, - CompareOperator.GREATER_OR_EQUAL)); + countGreater = + countRows(t, createScanWithRowFilter(endKey, endKey, CompareOperator.GREATER_OR_EQUAL)); assertEquals(rowCount - endKeyCount, countGreater); } } @@ -551,16 +542,14 @@ public class TestFromClientSide { return s; } - private void assertRowCount(final Table t, final int expected) - throws IOException { - assertEquals(expected, TEST_UTIL.countRows(t, new Scan())); + private void assertRowCount(final Table t, final int expected) throws IOException { + assertEquals(expected, countRows(t, new Scan())); } - /* + /** * Split table into multiple regions. * @param t Table to split. * @return Map of regions to servers. - * @throws IOException */ private List splitTable(final Table t) throws IOException, InterruptedException { @@ -4374,7 +4363,7 @@ public class TestFromClientSide { // Test user metadata try (Admin admin = TEST_UTIL.getAdmin()) { // make a modifiable descriptor - HTableDescriptor desc = new HTableDescriptor(a.getTableDescriptor()); + HTableDescriptor desc = new HTableDescriptor(a.getDescriptor()); // offline the table admin.disableTable(tableAname); // add a user attribute to HTD @@ -4390,7 +4379,7 @@ public class TestFromClientSide { } // Test that attribute changes were applied - HTableDescriptor desc = a.getTableDescriptor(); + HTableDescriptor desc = new HTableDescriptor(a.getDescriptor()); assertEquals("wrong table descriptor returned", desc.getTableName(), tableAname); // check HTD attribute value = desc.getValue(attrName); @@ -6445,19 +6434,6 @@ public class TestFromClientSide { } } - private static Pair getStartEndKeys(List regions) { - final byte[][] startKeyList = new byte[regions.size()][]; - final byte[][] endKeyList = new byte[regions.size()][]; - - for (int i = 0; i < regions.size(); i++) { - RegionInfo region = regions.get(i).getRegionLocation().getRegion(); - startKeyList[i] = region.getStartKey(); - endKeyList[i] = region.getEndKey(); - } - - return new Pair<>(startKeyList, endKeyList); - } - @Test public void testFilterAllRecords() throws IOException { Scan scan = new Scan(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 94a2c54bda2..7e05b924f28 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -311,7 +311,7 @@ public class TestFromClientSide3 { // change the compaction.min config option for this table to 5 LOG.info("hbase.hstore.compaction.min should now be 5"); - HTableDescriptor htd = new HTableDescriptor(table.getTableDescriptor()); + HTableDescriptor htd = new HTableDescriptor(table.getDescriptor()); htd.setValue("hbase.hstore.compaction.min", String.valueOf(5)); admin.modifyTable(htd); LOG.info("alter status finished"); @@ -368,8 +368,8 @@ public class TestFromClientSide3 { htd.modifyFamily(hcd); admin.modifyTable(htd); LOG.info("alter status finished"); - assertNull(table.getTableDescriptor().getFamily(FAMILY).getValue( - "hbase.hstore.compaction.min")); + assertNull(table.getDescriptor().getColumnFamily(FAMILY) + .getValue(Bytes.toBytes("hbase.hstore.compaction.min"))); } } } @@ -541,7 +541,7 @@ public class TestFromClientSide3 { getList.add(get); getList.add(get2); - boolean[] exists = table.existsAll(getList); + boolean[] exists = table.exists(getList); assertEquals(true, exists[0]); assertEquals(true, exists[1]); @@ -593,7 +593,7 @@ public class TestFromClientSide3 { gets.add(new Get(Bytes.add(ANOTHERROW, new byte[]{0x00}))); LOG.info("Calling exists"); - boolean[] results = table.existsAll(gets); + boolean[] results = table.exists(gets); assertFalse(results[0]); assertFalse(results[1]); assertTrue(results[2]); @@ -607,7 +607,7 @@ public class TestFromClientSide3 { gets = new ArrayList<>(); gets.add(new Get(new byte[]{0x00})); gets.add(new Get(new byte[]{0x00, 0x00})); - results = table.existsAll(gets); + results = table.exists(gets); assertTrue(results[0]); assertFalse(results[1]); @@ -620,7 +620,7 @@ public class TestFromClientSide3 { gets.add(new Get(new byte[]{(byte) 0xff})); gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff})); gets.add(new Get(new byte[]{(byte) 0xff, (byte) 0xff, (byte) 0xff})); - results = table.existsAll(gets); + results = table.exists(gets); assertFalse(results[0]); assertTrue(results[1]); assertFalse(results[2]); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java index b1aba6a11b9..f9bcf88655a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIncrementsFromClientSide.java @@ -112,30 +112,22 @@ public class TestIncrementsFromClientSide { // Client will retry beacuse rpc timeout is small than the sleep time of first rpc call c.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 1500); - Connection connection = ConnectionFactory.createConnection(c); - Table t = connection.getTable(TableName.valueOf(name.getMethodName())); - if (t instanceof HTable) { - HTable table = (HTable) t; - table.setOperationTimeout(3 * 1000); + try (Connection connection = ConnectionFactory.createConnection(c); + Table table = connection.getTableBuilder(TableName.valueOf(name.getMethodName()), null) + .setOperationTimeout(3 * 1000).build()) { + Increment inc = new Increment(ROW); + inc.addColumn(HBaseTestingUtility.fam1, QUALIFIER, 1); + Result result = table.increment(inc); - try { - Increment inc = new Increment(ROW); - inc.addColumn(TEST_UTIL.fam1, QUALIFIER, 1); - Result result = table.increment(inc); + Cell[] cells = result.rawCells(); + assertEquals(1, cells.length); + assertIncrementKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, 1); - Cell [] cells = result.rawCells(); - assertEquals(1, cells.length); - assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1); - - // Verify expected result - Result readResult = table.get(new Get(ROW)); - cells = readResult.rawCells(); - assertEquals(1, cells.length); - assertIncrementKey(cells[0], ROW, TEST_UTIL.fam1, QUALIFIER, 1); - } finally { - table.close(); - connection.close(); - } + // Verify expected result + Result readResult = table.get(new Get(ROW)); + cells = readResult.rawCells(); + assertEquals(1, cells.length); + assertIncrementKey(cells[0], ROW, HBaseTestingUtility.fam1, QUALIFIER, 1); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java index d55e8e0182e..69b9132d642 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestPassCustomCellViaRegionObserver.java @@ -156,10 +156,11 @@ public class TestPassCustomCellViaRegionObserver { table.get(new Get(ROW)).isEmpty()); assertObserverHasExecuted(); - assertTrue(table.checkAndPut(ROW, FAMILY, QUALIFIER, null, put)); + assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put)); assertObserverHasExecuted(); - assertTrue(table.checkAndDelete(ROW, FAMILY, QUALIFIER, VALUE, delete)); + assertTrue( + table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete)); assertObserverHasExecuted(); assertTrue(table.get(new Get(ROW)).isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java index 4cfc02c5fe8..a7cff685fd9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultiRowRangeFilter.java @@ -258,7 +258,7 @@ public class TestMultiRowRangeFilter { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); @@ -286,7 +286,7 @@ public class TestMultiRowRangeFilter { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); @@ -312,7 +312,7 @@ public class TestMultiRowRangeFilter { Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false)); @@ -334,7 +334,7 @@ public class TestMultiRowRangeFilter { Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(""), false)); @@ -356,7 +356,7 @@ public class TestMultiRowRangeFilter { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges = new ArrayList<>(); ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); @@ -381,29 +381,28 @@ public class TestMultiRowRangeFilter { public void testMultiRowRangeFilterWithExclusive() throws IOException { tableName = TableName.valueOf(name.getMethodName()); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 6000000); - Table ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); - ht.setReadRpcTimeout(600000); - ht.setOperationTimeout(6000000); - generateRows(numRows, ht, family, qf, value); + TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE); + try (Table ht = TEST_UTIL.getConnection().getTableBuilder(tableName, null) + .setReadRpcTimeout(600000).setOperationTimeout(6000000).build()) { + generateRows(numRows, ht, family, qf, value); - Scan scan = new Scan(); - scan.setMaxVersions(); + Scan scan = new Scan(); + scan.readAllVersions(); - List ranges = new ArrayList<>(); - ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); - ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false)); - ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false)); + List ranges = new ArrayList<>(); + ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); + ranges.add(new RowRange(Bytes.toBytes(20), false, Bytes.toBytes(40), false)); + ranges.add(new RowRange(Bytes.toBytes(65), true, Bytes.toBytes(75), false)); - MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); - scan.setFilter(filter); - int resultsSize = getResultsSize(ht, scan); - LOG.info("found " + resultsSize + " results"); - List results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht); - List results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht); + MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges); + scan.setFilter(filter); + int resultsSize = getResultsSize(ht, scan); + LOG.info("found " + resultsSize + " results"); + List results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(40), ht); + List results2 = getScanResult(Bytes.toBytes(65), Bytes.toBytes(75), ht); - assertEquals((results1.size() - 1) + results2.size(), resultsSize); - - ht.close(); + assertEquals((results1.size() - 1) + results2.size(), resultsSize); + } } @Test @@ -413,7 +412,7 @@ public class TestMultiRowRangeFilter { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges1 = new ArrayList<>(); ranges1.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false)); @@ -448,7 +447,7 @@ public class TestMultiRowRangeFilter { generateRows(numRows, ht, family, qf, value); Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); List ranges1 = new ArrayList<>(); ranges1.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false)); @@ -648,12 +647,12 @@ public class TestMultiRowRangeFilter { private List getScanResult(byte[] startRow, byte[] stopRow, Table ht) throws IOException { Scan scan = new Scan(); - scan.setMaxVersions(); + scan.readAllVersions(); if(!Bytes.toString(startRow).isEmpty()) { - scan.setStartRow(startRow); + scan.withStartRow(startRow); } if(!Bytes.toString(stopRow).isEmpty()) { - scan.setStopRow(stopRow); + scan.withStopRow(stopRow); } ResultScanner scanner = ht.getScanner(scan); List kvList = new ArrayList<>(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index 3babd2ea79f..437ddfc3c5d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -24,7 +24,6 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Waiter; @@ -32,6 +31,7 @@ import org.apache.hadoop.hbase.client.CompactionState; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -139,11 +139,10 @@ public class TestWarmupRegion { RegionInfo info = region.getRegionInfo(); try { - HTableDescriptor htd = table.getTableDescriptor(); + TableDescriptor htd = table.getDescriptor(); for (int i = 0; i < 10; i++) { warmupHRegion(info, htd, rs.getWAL(info), rs.getConfiguration(), rs, null); } - } catch (IOException ie) { LOG.error("Failed warming up region " + info.getRegionNameAsString(), ie); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java index 6a520d1145c..07b834bef32 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/RegionAsTable.java @@ -29,8 +29,6 @@ import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -81,12 +79,6 @@ public class RegionAsTable implements Table { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public HTableDescriptor getTableDescriptor() throws IOException { - return new HTableDescriptor(this.region.getTableDescriptor()); - } - @Override public TableDescriptor getDescriptor() throws IOException { return this.region.getTableDescriptor(); @@ -211,21 +203,6 @@ public class RegionAsTable implements Table { for (Put put: puts) put(put); } - @Override - @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Put put) - throws IOException { - throw new UnsupportedOperationException(); - } - @Override public void delete(Delete delete) throws IOException { this.region.delete(delete); @@ -236,21 +213,6 @@ public class RegionAsTable implements Table { for(Delete delete: deletes) delete(delete); } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, - Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, Delete delete) - throws IOException { - throw new UnsupportedOperationException(); - } - @Override public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { throw new UnsupportedOperationException(); @@ -325,77 +287,26 @@ public class RegionAsTable implements Table { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, - CompareOperator compareOp, byte[] value, RowMutations mutation) throws IOException { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public void setOperationTimeout(int operationTimeout) { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public int getOperationTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public void setRpcTimeout(int rpcTimeout) { - throw new UnsupportedOperationException(); - } - @Override public long getReadRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public void setWriteRpcTimeout(int writeRpcTimeout) {throw new UnsupportedOperationException(); } - @Override public long getOperationTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public void setReadRpcTimeout(int readRpcTimeout) {throw new UnsupportedOperationException(); } - @Override public long getWriteRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public int getRpcTimeout() { - throw new UnsupportedOperationException(); - } - @Override public long getRpcTimeout(TimeUnit unit) { throw new UnsupportedOperationException(); } - @Override - @Deprecated - public int getWriteRpcTimeout() { - throw new UnsupportedOperationException(); - } - - @Override - @Deprecated - public int getReadRpcTimeout() { - throw new UnsupportedOperationException(); - } - @Override public RegionLocator getRegionLocator() throws IOException { throw new UnsupportedOperationException(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java index d0bc3737366..10c745ea000 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestNewVersionBehaviorFromClientSide.java @@ -82,7 +82,7 @@ public class TestNewVersionBehaviorFromClientSide { fam.setNewVersionBehavior(true); fam.setMaxVersions(3); table.addFamily(fam); - TEST_UTIL.getHBaseAdmin().createTable(table); + TEST_UTIL.getAdmin().createTable(table); return TEST_UTIL.getConnection().getTable(tableName); } @@ -310,10 +310,11 @@ public class TestNewVersionBehaviorFromClientSide { } @Test - public void testgetColumnHint() throws IOException { - try (Table t = createTable()) { - t.setOperationTimeout(10000); - t.setRpcTimeout(10000); + public void testGetColumnHint() throws IOException { + createTable(); + try (Table t = + TEST_UTIL.getConnection().getTableBuilder(TableName.valueOf(name.getMethodName()), null) + .setOperationTimeout(10000).setRpcTimeout(10000).build()) { t.put(new Put(ROW).addColumn(FAMILY, col1, 100, value)); t.put(new Put(ROW).addColumn(FAMILY, col1, 101, value)); t.put(new Put(ROW).addColumn(FAMILY, col1, 102, value)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 0e7c019de66..68ba2e04b47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -353,13 +353,6 @@ public class TestPerColumnFamilyFlush { TEST_UTIL.getAdmin().createNamespace( NamespaceDescriptor.create(TABLENAME.getNamespaceAsString()).build()); Table table = TEST_UTIL.createTable(TABLENAME, FAMILIES); - HTableDescriptor htd = table.getTableDescriptor(); - - for (byte[] family : FAMILIES) { - if (!htd.hasFamily(family)) { - htd.addFamily(new HColumnDescriptor(family)); - } - } // Add 100 edits for CF1, 20 for CF2, 20 for CF3. // These will all be interleaved in the log. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java index 130b6514ac0..3885312c018 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSettingTimeoutOnBlockingPoint.java @@ -107,12 +107,10 @@ public class TestSettingTimeoutOnBlockingPoint { } }); Thread getThread = new Thread(() -> { - try { - try( Table table = TEST_UTIL.getConnection().getTable(tableName)) { - table.setRpcTimeout(1000); - Delete delete = new Delete(ROW1); - table.delete(delete); - } + try (Table table = + TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { + Delete delete = new Delete(ROW1); + table.delete(delete); } catch (IOException e) { Assert.fail(e.getMessage()); } @@ -122,12 +120,12 @@ public class TestSettingTimeoutOnBlockingPoint { Threads.sleep(1000); getThread.start(); Threads.sleep(2000); - try (Table table = TEST_UTIL.getConnection().getTable(tableName)) { + try (Table table = + TEST_UTIL.getConnection().getTableBuilder(tableName, null).setRpcTimeout(1000).build()) { // We have only two handlers. The first thread will get a write lock for row1 and occupy // the first handler. The second thread need a read lock for row1, it should quit after 1000 // ms and give back the handler because it can not get the lock in time. // So we can get the value using the second handler. - table.setRpcTimeout(1000); table.get(new Get(ROW2)); // Will throw exception if the timeout checking is failed } finally { incrementThread.interrupt(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java index ff46a985568..d74008b856c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java @@ -28,17 +28,14 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; import org.apache.hadoop.hbase.CellBuilderFactory; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; @@ -302,11 +299,6 @@ public class TestWALEntrySinkFilter { return configuration; } - @Override - public HTableDescriptor getTableDescriptor() throws IOException { - return null; - } - @Override public TableDescriptor getDescriptor() throws IOException { return null; @@ -372,16 +364,6 @@ public class TestWALEntrySinkFilter { } - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException { - return false; - } - - @Override - public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put) throws IOException { - return false; - } - @Override public void delete(Delete delete) throws IOException { @@ -392,16 +374,6 @@ public class TestWALEntrySinkFilter { } - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException { - return false; - } - - @Override - public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Delete delete) throws IOException { - return false; - } - @Override public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) { return null; @@ -462,70 +434,26 @@ public class TestWALEntrySinkFilter { } - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, RowMutations mutation) throws IOException { - return false; - } - @Override public long getRpcTimeout(TimeUnit unit) { return 0; } - @Override - public int getRpcTimeout() { - return 0; - } - - @Override - public void setRpcTimeout(int rpcTimeout) { - - } - @Override public long getReadRpcTimeout(TimeUnit unit) { return 0; } - @Override - public int getReadRpcTimeout() { - return 0; - } - - @Override - public void setReadRpcTimeout(int readRpcTimeout) { - - } - @Override public long getWriteRpcTimeout(TimeUnit unit) { return 0; } - @Override - public int getWriteRpcTimeout() { - return 0; - } - - @Override - public void setWriteRpcTimeout(int writeRpcTimeout) { - - } - @Override public long getOperationTimeout(TimeUnit unit) { return 0; } - @Override - public int getOperationTimeout() { - return 0; - } - - @Override - public void setOperationTimeout(int operationTimeout) { - } - @Override public RegionLocator getRegionLocator() throws IOException { return null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java index 53ad1fdd467..a13b189c73e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestCoprocessorWhitelistMasterObserver.java @@ -110,7 +110,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { UTIL.waitUntilAllRegionsAssigned(TEST_TABLE); Connection connection = ConnectionFactory.createConnection(conf); Table t = connection.getTable(TEST_TABLE); - HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor()); + HTableDescriptor htd = new HTableDescriptor(t.getDescriptor()); htd.addCoprocessor("net.clayb.hbase.coprocessor.NotWhitelisted", new Path(coprocessorPath), Coprocessor.PRIORITY_USER, null); @@ -122,7 +122,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { // swallow exception from coprocessor } LOG.info("Done Modifying Table"); - assertEquals(0, t.getTableDescriptor().getCoprocessors().size()); + assertEquals(0, t.getDescriptor().getCoprocessorDescriptors().size()); } /** @@ -155,7 +155,7 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { // coprocessor file admin.disableTable(TEST_TABLE); Table t = connection.getTable(TEST_TABLE); - HTableDescriptor htd = new HTableDescriptor(t.getTableDescriptor()); + HTableDescriptor htd = new HTableDescriptor(t.getDescriptor()); htd.addCoprocessor("net.clayb.hbase.coprocessor.Whitelisted", new Path(coprocessorPath), Coprocessor.PRIORITY_USER, null); @@ -321,6 +321,6 @@ public class TestCoprocessorWhitelistMasterObserver extends SecureTestUtil { // ensure table was created and coprocessor is added to table LOG.info("Done Creating Table"); Table t = connection.getTable(TEST_TABLE); - assertEquals(1, t.getTableDescriptor().getCoprocessors().size()); + assertEquals(1, t.getDescriptor().getCoprocessorDescriptors().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java index 17674af16af..fb7da123273 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRegionSnapshotTask.java @@ -127,7 +127,7 @@ public class TestRegionSnapshotTask { Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf); final SnapshotManifest manifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor); - manifest.addTableDescriptor(table.getTableDescriptor()); + manifest.addTableDescriptor(table.getDescriptor()); if (!fs.exists(workingDir)) { fs.mkdirs(workingDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 8f498d38eb4..c3735ecfa20 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -137,7 +137,7 @@ public class OfflineMetaRebuildTestCore { return this.connection.getTable(tablename); } - private void dumpMeta(HTableDescriptor htd) throws IOException { + private void dumpMeta(TableDescriptor htd) throws IOException { List metaRows = TEST_UTIL.getMetaTableRows(htd.getTableName()); for (byte[] row : metaRows) { LOG.info(Bytes.toString(row)); @@ -162,7 +162,7 @@ public class OfflineMetaRebuildTestCore { byte[] startKey, byte[] endKey) throws IOException { LOG.info("Before delete:"); - HTableDescriptor htd = tbl.getTableDescriptor(); + TableDescriptor htd = tbl.getDescriptor(); dumpMeta(htd); List regions; @@ -203,7 +203,7 @@ public class OfflineMetaRebuildTestCore { protected RegionInfo createRegion(Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException { Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); - HTableDescriptor htd = htbl.getTableDescriptor(); + TableDescriptor htd = htbl.getDescriptor(); RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName()) .setStartKey(startKey) .setEndKey(endKey) diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java index 5b6132d339e..7052d079da8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftHBaseServiceHandler.java @@ -30,7 +30,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilder; @@ -47,6 +46,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -103,16 +103,13 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb /** * Returns a list of all the column families for a given Table. - * * @param table table - * @throws IOException */ byte[][] getAllColumns(Table table) throws IOException { - HColumnDescriptor[] cds = table.getTableDescriptor().getColumnFamilies(); + ColumnFamilyDescriptor[] cds = table.getDescriptor().getColumnFamilies(); byte[][] columns = new byte[cds.length][]; for (int i = 0; i < cds.length; i++) { - columns[i] = Bytes.add(cds[i].getName(), - KeyValue.COLUMN_FAMILY_DELIM_ARRAY); + columns[i] = Bytes.add(cds[i].getName(), KeyValue.COLUMN_FAMILY_DELIM_ARRAY); } return columns; } @@ -1090,7 +1087,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements Hb TreeMap columns = new TreeMap<>(); table = getTable(tableName); - HTableDescriptor desc = table.getTableDescriptor(); + HTableDescriptor desc = new HTableDescriptor(table.getDescriptor()); for (HColumnDescriptor e : desc.getFamilies()) { ColumnDescriptor col = ThriftUtilities.colDescFromHbase(e); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java index 8b1be58c250..565a9c733f8 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftHBaseServiceHandler.java @@ -227,7 +227,7 @@ public class ThriftHBaseServiceHandler extends HBaseServiceHandler implements TH public List existsAll(ByteBuffer table, List gets) throws TIOError, TException { Table htable = getTable(table); try { - boolean[] exists = htable.existsAll(getsFromThrift(gets)); + boolean[] exists = htable.exists(getsFromThrift(gets)); List result = new ArrayList<>(exists.length); for (boolean exist : exists) { result.add(exist); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java index 6db9474b6ab..2bae6851f08 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftTable.java @@ -29,7 +29,6 @@ import java.util.Arrays; import java.util.List; import java.util.Queue; import java.util.concurrent.TimeUnit; - import org.apache.commons.lang3.NotImplementedException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompareOperator; @@ -409,15 +408,13 @@ public class ThriftTable implements Table { } } - - @Override - public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, + private boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, RowMutations mutation) throws IOException { try { - ByteBuffer valueBuffer = value == null? null : ByteBuffer.wrap(value); + ByteBuffer valueBuffer = value == null ? null : ByteBuffer.wrap(value); return client.checkAndMutate(tableNameInBytes, ByteBuffer.wrap(row), ByteBuffer.wrap(family), - ByteBuffer.wrap(qualifier), ThriftUtilities.compareOpFromHBase(op), valueBuffer, - ThriftUtilities.rowMutationsFromHBase(mutation)); + ByteBuffer.wrap(qualifier), ThriftUtilities.compareOpFromHBase(op), valueBuffer, + ThriftUtilities.rowMutationsFromHBase(mutation)); } catch (TException e) { throw new IOException(e); } diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java index d947a86beff..953bf5b5e02 100644 --- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java +++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftConnection.java @@ -29,7 +29,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -614,7 +613,7 @@ public class TestThriftConnection { assertTrue(Bytes.equals(VALUE_1, value1)); assertNull(value2); assertTrue(table.exists(get)); - assertEquals(1, table.existsAll(Collections.singletonList(get)).length); + assertEquals(1, table.exists(Collections.singletonList(get)).length); Delete delete = new Delete(ROW_1); table.checkAndMutate(ROW_1, FAMILYA).qualifier(QUALIFIER_1)
NameRegion ServerReadRequestsWriteRequestsStorefileSizeNum.StorefilesMemSizeLocalityStart KeyEnd KeyReplicaID