diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index adf14968491..99feb14db9d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -170,10 +170,10 @@ class ConnectionImplementation implements ClusterConnection, Closeable { // be waiting for the master lock => deadlock. private final Object masterAndZKLock = new Object(); - // thread executor shared by all HTableInterface instances created + // thread executor shared by all Table instances created // by this connection private volatile ExecutorService batchPool = null; - // meta thread executor shared by all HTableInterface instances created + // meta thread executor shared by all Table instances created // by this connection private volatile ExecutorService metaLookupPool = null; private volatile boolean cleanupPool = false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java deleted file mode 100644 index 9d41218e1a4..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.client; - -import java.io.IOException; - -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.classification.InterfaceStability; - -/** - * Used to communicate with a single HBase table. - * Obtain an instance from a {@link Connection}. - * - * @since 0.21.0 - * @deprecated use {@link org.apache.hadoop.hbase.client.Table} instead - */ -@Deprecated -@InterfaceAudience.Private -@InterfaceStability.Stable -public interface HTableInterface extends Table { - - /** - * Gets the name of this table. - * - * @return the table name. - * @deprecated Use {@link #getName()} instead - */ - @Deprecated - byte[] getTableName(); - - /** - * Turns 'auto-flush' on or off. - *

- * When enabled (default), {@link Put} operations don't get buffered/delayed - * and are immediately executed. Failed operations are not retried. This is - * slower but safer. - *

- * Turning off {@code #autoFlush} means that multiple {@link Put}s will be - * accepted before any RPC is actually sent to do the write operations. If the - * application dies before pending writes get flushed to HBase, data will be - * lost. - *

- * When you turn {@code #autoFlush} off, you should also consider the - * {@code #clearBufferOnFail} option. By default, asynchronous {@link Put} - * requests will be retried on failure until successful. However, this can - * pollute the writeBuffer and slow down batching performance. Additionally, - * you may want to issue a number of Put requests and call - * {@link #flushCommits()} as a barrier. In both use cases, consider setting - * clearBufferOnFail to true to erase the buffer after {@link #flushCommits()} - * has been called, regardless of success. - *

- * In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each - * flushCommit, including the last one when closing the table. This is NOT recommended, - * most of the time you want to call {@code #setAutoFlush(false, true)}. - * - * @param autoFlush - * Whether or not to enable 'auto-flush'. - * @param clearBufferOnFail - * Whether to keep Put failures in the writeBuffer. If autoFlush is true, then - * the value of this parameter is ignored and clearBufferOnFail is set to true. - * Setting clearBufferOnFail to false is deprecated since 0.96. - * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. - * @see BufferedMutator#flush() - */ - @Deprecated - void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail); - - /** - * Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}. - * @deprecated in 0.99 since setting clearBufferOnFail is deprecated. Move on to - * {@link BufferedMutator} - */ - @Deprecated - void setAutoFlushTo(boolean autoFlush); - - /** - * Tells whether or not 'auto-flush' is turned on. - * - * @return {@code true} if 'auto-flush' is enabled (default), meaning - * {@link Put} operations don't get buffered/delayed and are immediately - * executed. - * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator} - */ - @Deprecated - boolean isAutoFlush(); - - /** - * Executes all the buffered {@link Put} operations. - *

- * This method gets called once automatically for every {@link Put} or batch - * of {@link Put}s (when put(List<Put>) is used) when - * {@link #isAutoFlush} is {@code true}. - * @throws IOException if a remote or network exception occurs. - * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#flush()} - */ - @Deprecated - void flushCommits() throws IOException; - - /** - * Returns the maximum size in bytes of the write buffer for this HTable. - *

- * The default value comes from the configuration parameter - * {@code hbase.client.write.buffer}. - * @return The size of the write buffer in bytes. - * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#getWriteBufferSize()} - */ - @Deprecated - long getWriteBufferSize(); - - /** - * Sets the size of the buffer in bytes. - *

- * If the new size is less than the current amount of data in the - * write buffer, the buffer gets flushed. - * @param writeBufferSize The new write buffer size, in bytes. - * @throws IOException if a remote or network exception occurs. - * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator} and - * {@link BufferedMutatorParams#writeBufferSize(long)} - */ - @Deprecated - void setWriteBufferSize(long writeBufferSize) throws IOException; -} diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc index 9768c96d30e..773d23729ef 100644 --- a/src/main/asciidoc/_chapters/architecture.adoc +++ b/src/main/asciidoc/_chapters/architecture.adoc @@ -227,8 +227,6 @@ try (Connection connection = ConnectionFactory.createConnection(conf)) { ---- ==== -Constructing HTableInterface implementation is very lightweight and resources are controlled. - .`HTablePool` is Deprecated [WARNING] ==== @@ -398,7 +396,7 @@ Example: Find all columns in a row and family that start with "abc" [source,java] ---- -HTableInterface t = ...; +Table t = ...; byte[] row = ...; byte[] family = ...; byte[] prefix = Bytes.toBytes("abc"); @@ -428,7 +426,7 @@ Example: Find all columns in a row and family that start with "abc" or "xyz" [source,java] ---- -HTableInterface t = ...; +Table t = ...; byte[] row = ...; byte[] family = ...; byte[][] prefixes = new byte[][] {Bytes.toBytes("abc"), Bytes.toBytes("xyz")}; @@ -463,7 +461,7 @@ Example: Find all columns in a row and family between "bbbb" (inclusive) and "bb [source,java] ---- -HTableInterface t = ...; +Table t = ...; byte[] row = ...; byte[] family = ...; byte[] startColumn = Bytes.toBytes("bbbb"); diff --git a/src/main/asciidoc/_chapters/cp.adoc b/src/main/asciidoc/_chapters/cp.adoc index 47f92bb05b0..d3fcd47baa3 100644 --- a/src/main/asciidoc/_chapters/cp.adoc +++ b/src/main/asciidoc/_chapters/cp.adoc @@ -180,8 +180,7 @@ In contrast to observer coprocessors, where your code is run transparently, endp coprocessors must be explicitly invoked using the link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html#coprocessorService%28java.lang.Class,%20byte%5B%5D,%20byte%5B%5D,%20org.apache.hadoop.hbase.client.coprocessor.Batch.Call%29[CoprocessorService()] method available in -link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table], -link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTableInterface.html[HTableInterface], +link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/Table.html[Table] or link:https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/client/HTable.html[HTable]. diff --git a/src/main/asciidoc/_chapters/unit_testing.adoc b/src/main/asciidoc/_chapters/unit_testing.adoc index 0c4d812f94a..6131d5a9c62 100644 --- a/src/main/asciidoc/_chapters/unit_testing.adoc +++ b/src/main/asciidoc/_chapters/unit_testing.adoc @@ -295,28 +295,28 @@ public class MyHBaseIntegrationTest { @Before public void setup() throws Exception { - utility = new HBaseTestingUtility(); - utility.startMiniCluster(); + utility = new HBaseTestingUtility(); + utility.startMiniCluster(); } @Test - public void testInsert() throws Exception { - HTableInterface table = utility.createTable(Bytes.toBytes("MyTest"), CF); - HBaseTestObj obj = new HBaseTestObj(); - obj.setRowKey("ROWKEY-1"); - obj.setData1("DATA-1"); - obj.setData2("DATA-2"); - MyHBaseDAO.insertRecord(table, obj); - Get get1 = new Get(Bytes.toBytes(obj.getRowKey())); - get1.addColumn(CF, CQ1); - Result result1 = table.get(get1); - assertEquals(Bytes.toString(result1.getRow()), obj.getRowKey()); - assertEquals(Bytes.toString(result1.value()), obj.getData1()); - Get get2 = new Get(Bytes.toBytes(obj.getRowKey())); - get2.addColumn(CF, CQ2); - Result result2 = table.get(get2); - assertEquals(Bytes.toString(result2.getRow()), obj.getRowKey()); - assertEquals(Bytes.toString(result2.value()), obj.getData2()); + public void testInsert() throws Exception { + Table table = utility.createTable(Bytes.toBytes("MyTest"), CF); + HBaseTestObj obj = new HBaseTestObj(); + obj.setRowKey("ROWKEY-1"); + obj.setData1("DATA-1"); + obj.setData2("DATA-2"); + MyHBaseDAO.insertRecord(table, obj); + Get get1 = new Get(Bytes.toBytes(obj.getRowKey())); + get1.addColumn(CF, CQ1); + Result result1 = table.get(get1); + assertEquals(Bytes.toString(result1.getRow()), obj.getRowKey()); + assertEquals(Bytes.toString(result1.value()), obj.getData1()); + Get get2 = new Get(Bytes.toBytes(obj.getRowKey())); + get2.addColumn(CF, CQ2); + Result result2 = table.get(get2); + assertEquals(Bytes.toString(result2.getRow()), obj.getRowKey()); + assertEquals(Bytes.toString(result2.value()), obj.getData2()); } } ----