From 682b8ab8a542a903e5807053282693e3a96bad2d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?G=C3=A1bor=20Lipt=C3=A1k?=
Date: Sun, 26 Apr 2015 21:07:45 -0400
Subject: [PATCH] HBASE-13569 Correct Javadoc (for Java8)
Signed-off-by: Sean Busbey
---
.../hadoop/hbase/HColumnDescriptor.java | 6 +--
.../org/apache/hadoop/hbase/HRegionInfo.java | 7 ++-
.../apache/hadoop/hbase/HTableDescriptor.java | 2 +-
.../hadoop/hbase/MetaTableAccessor.java | 4 +-
.../hbase/client/AbstractClientScanner.java | 4 +-
.../org/apache/hadoop/hbase/client/Admin.java | 12 ++---
.../client/ClientSmallReversedScanner.java | 3 +-
.../hadoop/hbase/client/Durability.java | 4 +-
.../hadoop/hbase/client/HBaseAdmin.java | 3 +-
.../hadoop/hbase/client/HTableInterface.java | 2 +-
.../hbase/client/HTableMultiplexer.java | 3 --
.../org/apache/hadoop/hbase/client/Put.java | 2 +-
.../apache/hadoop/hbase/client/Result.java | 10 ++--
.../hadoop/hbase/client/ResultScanner.java | 2 +-
.../hadoop/hbase/client/RetryingCallable.java | 4 +-
.../RpcRetryingCallerWithReadReplicas.java | 3 +-
.../org/apache/hadoop/hbase/client/Scan.java | 11 ++---
.../client/coprocessor/AggregationClient.java | 41 ++++++++--------
.../hadoop/hbase/client/package-info.java | 5 --
.../hbase/coprocessor/ColumnInterpreter.java | 31 ++++++------
.../hadoop/hbase/filter/FamilyFilter.java | 7 +--
.../apache/hadoop/hbase/filter/Filter.java | 1 -
.../hadoop/hbase/filter/FilterBase.java | 18 +++----
.../hadoop/hbase/filter/FilterList.java | 8 ++--
.../hbase/filter/MultiRowRangeFilter.java | 2 +-
.../hadoop/hbase/filter/PageFilter.java | 4 +-
.../hadoop/hbase/filter/SkipFilter.java | 4 +-
.../hadoop/hbase/ipc/AsyncRpcClient.java | 1 -
.../apache/hadoop/hbase/ipc/ConnectionId.java | 2 +-
.../hadoop/hbase/ipc/ServerRpcController.java | 3 +-
.../hadoop/hbase/protobuf/ProtobufUtil.java | 6 +--
.../hadoop/hbase/quotas/QuotaTableUtil.java | 10 ++--
.../hadoop/hbase/regionserver/BloomType.java | 2 +-
.../hbase/replication/ReplicationPeers.java | 2 +-
.../security/visibility/CellVisibility.java | 7 +--
.../apache/hadoop/hbase/zookeeper/ZKUtil.java | 3 --
.../hbase/zookeeper/ZooKeeperWatcher.java | 1 -
.../java/org/apache/hadoop/hbase/Cell.java | 47 +++++++++++--------
.../apache/hadoop/hbase/CellComparator.java | 4 +-
.../apache/hadoop/hbase/CellScannable.java | 2 +-
.../org/apache/hadoop/hbase/CellScanner.java | 2 +-
.../org/apache/hadoop/hbase/HConstants.java | 3 +-
.../org/apache/hadoop/hbase/KeyValue.java | 27 ++++++-----
.../org/apache/hadoop/hbase/KeyValueUtil.java | 2 +-
.../org/apache/hadoop/hbase/ServerName.java | 11 +++--
.../org/apache/hadoop/hbase/TableName.java | 4 +-
.../hadoop/hbase/io/CellOutputStream.java | 2 +-
.../org/apache/hadoop/hbase/io/TimeRange.java | 2 +-
.../hbase/io/crypto/KeyStoreKeyProvider.java | 2 +-
.../hadoopbackport/ThrottledInputStream.java | 8 ++--
.../hadoop/hbase/io/util/Dictionary.java | 4 +-
.../apache/hadoop/hbase/security/User.java | 1 -
.../org/apache/hadoop/hbase/types/Struct.java | 6 +--
.../hadoop/hbase/util/AbstractByteRange.java | 4 +-
.../apache/hadoop/hbase/util/Addressing.java | 8 ++--
.../org/apache/hadoop/hbase/util/Base64.java | 2 +-
.../apache/hadoop/hbase/util/ByteRange.java | 4 +-
.../org/apache/hadoop/hbase/util/Bytes.java | 8 ++--
.../apache/hadoop/hbase/util/ClassSize.java | 4 +-
.../hbase/util/DefaultEnvironmentEdge.java | 3 +-
.../util/IncrementingEnvironmentEdge.java | 3 +-
.../apache/hadoop/hbase/util/JenkinsHash.java | 4 +-
.../apache/hadoop/hbase/util/KeyLocker.java | 5 +-
.../hadoop/hbase/util/OrderedBytes.java | 9 ++--
.../org/apache/hadoop/hbase/util/Sleeper.java | 4 +-
.../hadoop/hbase/metrics/BaseSource.java | 2 +-
.../codec/prefixtree/PrefixTreeCodec.java | 3 +-
.../codec/prefixtree/PrefixTreeSeeker.java | 8 ++--
.../prefixtree/decode/ArraySearcherPool.java | 3 +-
.../decode/PrefixTreeArraySearcher.java | 11 +++--
.../prefixtree/encode/PrefixTreeEncoder.java | 15 +++---
.../encode/column/ColumnNodeWriter.java | 7 ++-
.../encode/column/ColumnSectionWriter.java | 6 ++-
.../prefixtree/encode/row/RowNodeWriter.java | 3 +-
.../prefixtree/encode/tokenize/Tokenizer.java | 2 +
.../encode/tokenize/TokenizerNode.java | 32 +++++++------
.../prefixtree/scanner/CellSearcher.java | 34 +++++++++-----
.../scanner/ReversibleCellScanner.java | 4 +-
.../hadoop/hbase/util/vint/UFIntTool.java | 8 ++--
.../hadoop/hbase/util/vint/UVIntTool.java | 4 +-
.../hadoop/hbase/util/vint/UVLongTool.java | 4 +-
.../hadoop/hbase/procedure2/Procedure.java | 6 +--
.../hbase/procedure2/SequentialProcedure.java | 4 +-
.../protobuf/HBaseZeroCopyByteString.java | 6 +++
.../apache/hadoop/hbase/InterProcessLock.java | 2 +-
.../example/HFileArchiveTableMonitor.java | 2 +-
.../hbase/constraint/ConstraintException.java | 2 +-
.../hadoop/hbase/constraint/package-info.java | 6 +--
.../SplitLogManagerCoordination.java | 2 +-
.../ZKSplitLogManagerCoordination.java | 5 +-
.../coprocessor/AggregateImplementation.java | 11 ++---
.../coprocessor/MultiRowMutationEndpoint.java | 10 ++--
.../hbase/coprocessor/RegionObserver.java | 8 ++--
.../hbase/coprocessor/package-info.java | 36 +++++++-------
.../hbase/errorhandling/TimeoutException.java | 3 +-
.../apache/hadoop/hbase/http/HttpServer.java | 13 ++---
.../hadoop/hbase/http/package-info.java | 3 +-
.../org/apache/hadoop/hbase/io/FileLink.java | 2 +-
.../org/apache/hadoop/hbase/io/HFileLink.java | 1 -
.../org/apache/hadoop/hbase/io/WALLink.java | 4 +-
.../apache/hadoop/hbase/io/hfile/HFile.java | 5 +-
.../hadoop/hbase/io/hfile/HFileBlock.java | 12 ++---
.../hbase/io/hfile/HFileReaderImpl.java | 2 +-
.../hadoop/hbase/io/hfile/HFileScanner.java | 18 +++----
.../hadoop/hbase/io/hfile/LruBlockCache.java | 2 +-
.../hbase/io/hfile/LruCachedBlockQueue.java | 2 +-
.../hadoop/hbase/io/hfile/package-info.java | 2 +-
.../hadoop/hbase/ipc/RpcCallContext.java | 2 +-
.../mapreduce/MultiTableInputFormat.java | 2 +-
.../hbase/mapreduce/TableMapReduceUtil.java | 9 ++--
.../mapreduce/TableRecordReaderImpl.java | 3 +-
.../apache/hadoop/hbase/master/HMaster.java | 2 +-
.../hbase/master/MasterRpcServices.java | 2 +-
.../hadoop/hbase/master/RegionStateStore.java | 2 +-
.../master/balancer/SimpleLoadBalancer.java | 4 +-
.../balancer/StochasticLoadBalancer.java | 4 +-
.../hbase/regionserver/ColumnTracker.java | 14 +++---
.../regionserver/CompactionRequestor.java | 4 +-
.../hbase/regionserver/DefaultMemStore.java | 2 +-
.../hbase/regionserver/DeleteTracker.java | 7 +--
.../regionserver/ExplicitColumnTracker.java | 7 +--
.../hbase/regionserver/HeapMemoryManager.java | 2 +-
...creasingToUpperBoundRegionSplitPolicy.java | 4 +-
.../hadoop/hbase/regionserver/LruHashMap.java | 6 +--
.../hbase/regionserver/MemStoreChunkPool.java | 2 +-
.../regionserver/MetricsRegionServer.java | 3 +-
.../MiniBatchOperationInProgress.java | 2 +-
.../NonReversedNonLazyKeyValueScanner.java | 2 +-
.../hbase/regionserver/RSRpcServices.java | 8 ++--
.../hadoop/hbase/regionserver/Region.java | 3 +-
.../hbase/regionserver/RegionScanner.java | 4 +-
.../hbase/regionserver/ScanDeleteTracker.java | 7 +--
.../hadoop/hbase/regionserver/Store.java | 7 +--
.../hbase/regionserver/StoreScanner.java | 2 +-
.../hbase/regionserver/wal/ReplayHLogKey.java | 1 -
.../hbase/regionserver/wal/WALEdit.java | 14 +++---
.../regionserver/wal/WALEditsReplaySink.java | 4 +-
.../replication/HBaseReplicationEndpoint.java | 1 -
.../HBaseInterClusterReplicationEndpoint.java | 3 +-
.../regionserver/ReplicationSink.java | 7 +--
.../regionserver/ReplicationSource.java | 4 +-
.../ReplicationSourceManager.java | 2 +
.../regionserver/ReplicationThrottler.java | 2 +-
.../security/access/AccessControlLists.java | 1 +
.../security/access/AccessController.java | 2 +
.../security/visibility/VisibilityUtils.java | 2 +-
.../snapshot/SnapshotDescriptionUtils.java | 14 +++---
.../hadoop/hbase/util/BloomFilterWriter.java | 2 +-
.../org/apache/hadoop/hbase/util/FSUtils.java | 9 ++--
.../apache/hadoop/hbase/util/HBaseFsck.java | 2 +-
.../hadoop/hbase/util/HFileV1Detector.java | 6 ++-
.../hadoop/hbase/util/MultiHConnection.java | 1 -
.../hadoop/hbase/util/RegionSplitter.java | 14 +++---
.../hbase/util/ServerRegionReplicaUtil.java | 2 +-
.../org/apache/hadoop/hbase/wal/WALKey.java | 4 +-
.../hadoop/hbase/zookeeper/ZKSplitLog.java | 8 ++--
.../hbase/thrift/ThriftServerRunner.java | 3 +-
.../hadoop/hbase/thrift/generated/Hbase.java | 4 --
158 files changed, 510 insertions(+), 458 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 20cfbeff898..3da018a0e39 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -85,8 +85,8 @@ public class HColumnDescriptor implements Comparable {
/**
* Key for cache data into L1 if cache is set up with more than one tier.
* To set in the shell, do something like this:
- * hbase(main):003:0> create 't',
- * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}
+ * hbase(main):003:0> create 't',
+ * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}}
*/
public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
@@ -115,7 +115,7 @@ public class HColumnDescriptor implements Comparable {
/**
* Retain all cells across flushes and compactions even if they fall behind
* a delete tombstone. To see all retained cells, do a 'raw' scan; see
- * Scan#setRaw or pass RAW => true attribute in the shell.
+ * Scan#setRaw or pass RAW => true attribute in the shell.
*/
public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS";
public static final String COMPRESS_TAGS = "COMPRESS_TAGS";
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index adca3d7f1da..c134063c9a0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -54,17 +54,21 @@ import com.google.protobuf.InvalidProtocolBufferException;
* about the region.
*
* The region has a unique name which consists of the following fields:
+ *
* - tableName : The name of the table
* - startKey : The startKey for the region.
* - regionId : A timestamp when the region is created.
* - replicaId : An id starting from 0 to differentiate replicas of the same region range
* but hosted in separated servers. The same region range can be hosted in multiple locations.
* - encodedName : An MD5 encoded string for the region name.
+ *
*
*
Other than the fields in the region name, region info contains:
+ *
* - endKey : the endKey for the region (exclusive)
* - split : Whether the region is split
* - offline : Whether the region is offline
+ *
*
* In 0.98 or before, a list of table's regions would fully cover the total keyspace, and at any
* point in time, a row key always belongs to a single region, which is hosted in a single server.
@@ -647,7 +651,7 @@ public class HRegionInfo implements Comparable {
* by this region. For example, if the region is foo,a,g and this is
* passed ["b","c"] or ["a","c"] it will return true, but if this is passed
* ["b","z"] it will return false.
- * @throws IllegalArgumentException if the range passed is invalid (ie end < start)
+ * @throws IllegalArgumentException if the range passed is invalid (ie. end < start)
*/
public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) {
if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) {
@@ -1098,7 +1102,6 @@ public class HRegionInfo implements Comparable {
* @param r Result to pull from
* @return A pair of the {@link HRegionInfo} and the {@link ServerName}
* (or null for server address if no address set in hbase:meta).
- * @throws IOException
* @deprecated use MetaTableAccessor methods for interacting with meta layouts
*/
@Deprecated
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index f2f00773e68..58067ead0fc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -179,7 +179,7 @@ public class HTableDescriptor implements Comparable {
/**
* INTERNAL flag to indicate whether or not the memstore should be replicated
- * for read-replicas (CONSISTENCY => TIMELINE).
+ * for read-replicas (CONSISTENCY => TIMELINE).
*/
public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION";
private static final Bytes REGION_MEMSTORE_REPLICATION_KEY =
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index c53f99800f5..86e8d464994 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -180,7 +180,7 @@ public class MetaTableAccessor {
}
}
- /** The delimiter for meta columns for replicaIds > 0 */
+ /** The delimiter for meta columns for replicaIds > 0 */
protected static final char META_REPLICA_ID_DELIMITER = '_';
/** A regex for parsing server columns from meta. See above javadoc for meta layout */
@@ -1080,7 +1080,7 @@ public class MetaTableAccessor {
/**
* Fetch table states from META table
* @param conn connection to use
- * @return map {tableName -> state}
+ * @return map {tableName -> state}
* @throws IOException
*/
public static Map getTableStates(Connection conn)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
index dc325a3cfe1..7658faff8b6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java
@@ -54,11 +54,11 @@ public abstract class AbstractClientScanner implements ResultScanner {
}
/**
- * Get nbRows rows.
+ * Get nbRows rows.
* How many RPCs are made is determined by the {@link Scan#setCaching(int)}
* setting (or hbase.client.scanner.caching in hbase-site.xml).
* @param nbRows number of rows to return
- * @return Between zero and nbRows RowResults. Scan is done
+ * @return Between zero and nbRows rowResults. Scan is done
* if returned array is of zero-length (We never return null).
* @throws IOException
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f00d6c918b3..fcc0cae4e3d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -199,7 +199,7 @@ public interface Admin extends Abortable, Closeable {
*
* @param desc table descriptor for table
* @throws IllegalArgumentException if the table name is reserved
- * @throws MasterNotRunningException if master is not running
+ * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
* @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
* threads, the table may have been created between test-for-existence and attempt-at-creation).
* @throws IOException if a remote or network exception occurs
@@ -218,7 +218,7 @@ public interface Admin extends Abortable, Closeable {
* @param endKey end of key range
* @param numRegions the total number of regions to create
* @throws IllegalArgumentException if the table name is reserved
- * @throws MasterNotRunningException if master is not running
+ * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
* @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
* threads, the table may have been created between test-for-existence and attempt-at-creation).
* @throws IOException
@@ -235,7 +235,7 @@ public interface Admin extends Abortable, Closeable {
* @param splitKeys array of split keys for the initial regions of the table
* @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated
* and if the split key has empty byte array.
- * @throws MasterNotRunningException if master is not running
+ * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running
* @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent
* threads, the table may have been created between test-for-existence and attempt-at-creation).
* @throws IOException
@@ -248,11 +248,11 @@ public interface Admin extends Abortable, Closeable {
* It may throw ExecutionException if there was an error while executing the operation
* or TimeoutException in case the wait timeout was not long enough to allow the
* operation to complete.
+ * Throws IllegalArgumentException Bad table name, if the split keys
+ * are repeated and if the split key has empty byte array.
*
* @param desc table descriptor for table
* @param splitKeys keys to check if the table has been created with all split keys
- * @throws IllegalArgumentException Bad table name, if the split keys
- * are repeated and if the split key has empty byte array.
* @throws IOException if a remote or network exception occurs
* @return the result of the async creation. You can use Future.get(long, TimeUnit)
* to wait on the operation to complete.
@@ -727,7 +727,7 @@ public interface Admin extends Abortable, Closeable {
* @param destServerName The servername of the destination regionserver. If passed the empty byte
* array we'll assign to a random server. A server name is made of host, port and startcode.
* Here is an example: host187.example.com,60020,1289493121758
- * @throws UnknownRegionException Thrown if we can't find a region named
+ * @throws IOException if we can't find a region named
* encodedRegionName
*/
void move(final byte[] encodedRegionName, final byte[] destServerName)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
index 28502dc0244..5fac93a232b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java
@@ -39,10 +39,11 @@ import org.apache.hadoop.hbase.util.Bytes;
import com.google.common.annotations.VisibleForTesting;
/**
+ *
* Client scanner for small reversed scan. Generally, only one RPC is called to fetch the
* scan results, unless the results cross multiple regions or the row count of
* results exceed the caching.
- *
+ *
* For small scan, it will get better performance than {@link ReversedClientScanner}
*/
@InterfaceAudience.Private
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
index bc9a4ed8894..9b35e04e261 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java
@@ -47,13 +47,13 @@ public enum Durability {
* Write the Mutation to the WAL synchronously.
* The data is flushed to the filesystem implementation, but not necessarily to disk.
* For HDFS this will flush the data to the designated number of DataNodes.
- * See HADOOP-6313
+ * See HADOOP-6313
*/
SYNC_WAL,
/**
* Write the Mutation to the WAL synchronously and force the entries to disk.
* (Note: this is currently not supported and will behave identical to {@link #SYNC_WAL})
- * See HADOOP-6313
+ * See HADOOP-6313
*/
FSYNC_WAL
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 7047714ba51..a06fb2cef1d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -231,7 +231,8 @@ public class HBaseAdmin implements Admin {
* The connection to master will be created when required by admin functions.
*
* @param connection The Connection instance to use
- * @throws MasterNotRunningException, ZooKeeperConnectionException are not
+ * @throws MasterNotRunningException
+ * @throws ZooKeeperConnectionException are not
* thrown anymore but kept into the interface for backward api compatibility
* @deprecated Constructing HBaseAdmin objects manually has been deprecated.
* Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
index 84363079184..745c7709db0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java
@@ -126,7 +126,7 @@ public interface HTableInterface extends Table {
* Executes all the buffered {@link Put} operations.
*
* This method gets called once automatically for every {@link Put} or batch
- * of {@link Put}s (when put(List)
is used) when
+ * of {@link Put}s (when put(List<Put>)
is used) when
* {@link #isAutoFlush} is {@code true}.
* @throws IOException if a remote or network exception occurs.
* @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#flush()}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 10308da4193..b1f5b9f1c2e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -121,7 +121,6 @@ public class HTableMultiplexer {
* @param tableName
* @param put
* @return true if the request can be accepted by its corresponding buffer queue.
- * @throws IOException
*/
public boolean put(TableName tableName, final Put put) {
return put(tableName, put, this.retryNum);
@@ -133,7 +132,6 @@ public class HTableMultiplexer {
* @param tableName
* @param puts
* @return the list of puts which could not be queued
- * @throws IOException
*/
public List put(TableName tableName, final List puts) {
if (puts == null)
@@ -169,7 +167,6 @@ public class HTableMultiplexer {
* retried before dropping the request.
* Return false if the queue is already full.
* @return true if the request can be accepted by its corresponding buffer queue.
- * @throws IOException
*/
public boolean put(final TableName tableName, final Put put, int retry) {
if (retry <= 0) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
index c895eb43764..717ea3fcb6e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java
@@ -328,7 +328,7 @@ public class Put extends Mutation implements HeapSize, Comparable {
/**
* A convenience method to determine if this object's familyMap contains
- * a value assigned to the given family & qualifier.
+ * a value assigned to the given family & qualifier.
* Both given arguments must match the KeyValue object to return true.
*
* @param family column family
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index 666069cded1..702983bd321 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -204,7 +204,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Return the array of Cells backing this Result instance.
*
- * The array is sorted from smallest -> largest using the
+ * The array is sorted from smallest -> largest using the
* {@link CellComparator#COMPARATOR}.
*
* The array only contains what your Get or Scan specifies and no more.
@@ -601,7 +601,7 @@ public class Result implements CellScannable, CellScanner {
* Map of families to all versions of its qualifiers and values.
*
* Returns a three level Map of the form:
- * Map&family,Map<qualifier,Map<timestamp,value>>>
+ * Map&family,Map<qualifier,Map<timestamp,value>>>
*
* Note: All other map returning methods make use of this map internally.
* @return map from families to qualifiers to versions
@@ -643,7 +643,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Map of families to their most recent qualifiers and values.
*
- * Returns a two level Map of the form: Map&family,Map<qualifier,value>>
+ * Returns a two level Map of the form: Map&family,Map<qualifier,value>>
*
* The most recent version of each qualifier will be used.
* @return map from families to qualifiers and value
@@ -675,7 +675,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Map of qualifiers to values.
*
- * Returns a Map of the form: Map<qualifier,value>
+ * Returns a Map of the form: Map<qualifier,value>
* @param family column family to get
* @return map of qualifiers to values
*/
@@ -945,4 +945,4 @@ public class Result implements CellScannable, CellScanner {
throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!");
}
}
-}
\ No newline at end of file
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
index 6b7f1ddab36..d3efbdadbfe 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java
@@ -42,7 +42,7 @@ public interface ResultScanner extends Closeable, Iterable {
/**
* @param nbRows number of rows to return
- * @return Between zero and nbRows Results
+ * @return Between zero and nbRows results
* @throws IOException e
*/
Result [] next(int nbRows) throws IOException;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
index e468d3cf781..ea65fcf10e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java
@@ -24,7 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
- * A Callable that will be retried. If {@link #call(int)} invocation throws exceptions,
+ * A Callable<T> that will be retried. If {@link #call(int)} invocation throws exceptions,
* we will call {@link #throwable(Throwable, boolean)} with whatever the exception was.
* @param
*/
@@ -42,7 +42,7 @@ public interface RetryingCallable {
* make it so we succeed on next call (clear caches, do relookup of locations, etc.).
* @param t
* @param retrying True if we are in retrying mode (we are not in retrying mode when max
- * retries == 1; we ARE in retrying mode if retries > 1 even when we are the last attempt)
+ * retries == 1; we ARE in retrying mode if retries > 1 even when we are the last attempt)
*/
void throwable(final Throwable t, boolean retrying);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
index 8f288818852..d610d8c5948 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
@@ -174,6 +174,7 @@ public class RpcRetryingCallerWithReadReplicas {
}
/**
+ *
* Algo:
* - we put the query into the execution pool.
* - after x ms, if we don't have a result, we add the queries for the secondary replicas
@@ -186,7 +187,7 @@ public class RpcRetryingCallerWithReadReplicas {
* - a call is a thread. Let's not multiply the number of thread by the number of replicas.
* Server side, if we can cancel when it's still in the handler pool, it's much better, as a call
* can take some i/o.
- *
+ *
* Globally, the number of retries, timeout and so on still applies, but it's per replica,
* not global. We continue until all retries are done, or all timeouts are exceeded.
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index a0193fb2ed9..14b721bf08f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -727,10 +727,10 @@ public class Scan extends Query {
* this can deliver huge perf gains when there's a cf with lots of data; however, it can
* also lead to some inconsistent results, as follows:
* - if someone does a concurrent update to both column families in question you may get a row
- * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
- * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
- * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
- * { video => "my dog" } }.
+ * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } }
+ * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan
+ * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 },
+ * { video => "my dog" } }.
* - if there's a concurrent split and you have more than 2 column families, some rows may be
* missing some column families.
*/
@@ -982,7 +982,6 @@ public class Scan extends Query {
return ProtobufUtil.toScanMetrics(bytes);
}
-
public Boolean isAsyncPrefetch() {
return asyncPrefetch;
}
@@ -991,6 +990,4 @@ public class Scan extends Query {
this.asyncPrefetch = asyncPrefetch;
return this;
}
-
-
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index 5421e57a43f..594a459b169 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -64,16 +64,17 @@ import com.google.protobuf.Message;
*
* This will serve as the client side handler for invoking the aggregate
* functions.
- *
* For all aggregate functions,
- * - start row < end row is an essential condition (if they are not
+ *
+ * - start row < end row is an essential condition (if they are not
* {@link HConstants#EMPTY_BYTE_ARRAY})
*
- Column family can't be null. In case where multiple families are
* provided, an IOException will be thrown. An optional column qualifier can
- * also be defined.
+ * also be defined.
* - For methods to find maximum, minimum, sum, rowcount, it returns the
* parameter type. For average and std, it returns a double value. For row
- * count, it returns a long value.
+ * count, it returns a long value.
+ *
* Call {@link #close()} when done.
*/
@InterfaceAudience.Private
@@ -109,10 +110,10 @@ public class AggregationClient implements Closeable {
* @param tableName
* @param ci
* @param scan
- * @return max val
+ * @return max val <R>
* @throws Throwable
* The caller is supposed to handle the exception as they are thrown
- * & propagated to it.
+ * & propagated to it.
*/
public R max(
final TableName tableName, final ColumnInterpreter ci, final Scan scan)
@@ -129,10 +130,10 @@ public class AggregationClient implements Closeable {
* @param table
* @param ci
* @param scan
- * @return max val
+ * @return max val <>
* @throws Throwable
* The caller is supposed to handle the exception as they are thrown
- * & propagated to it.
+ * & propagated to it.
*/
public
R max(final Table table, final ColumnInterpreter ci,
@@ -199,7 +200,7 @@ public class AggregationClient implements Closeable {
* @param tableName
* @param ci
* @param scan
- * @return min val
+ * @return min val <R>
* @throws Throwable
*/
public R min(
@@ -217,7 +218,7 @@ public class AggregationClient implements Closeable {
* @param table
* @param ci
* @param scan
- * @return min val
+ * @return min val <R>
* @throws Throwable
*/
public
@@ -269,11 +270,11 @@ public class AggregationClient implements Closeable {
* optimised the operation. In case qualifier is provided, I can't use the
* filter as it may set the flag to skip to next row, but the value read is
* not of the given filter: in this case, this particular row will not be
- * counted ==> an error.
+ * counted ==> an error.
* @param tableName
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public long rowCount(
@@ -290,11 +291,11 @@ public class AggregationClient implements Closeable {
* optimised the operation. In case qualifier is provided, I can't use the
* filter as it may set the flag to skip to next row, but the value read is
* not of the given filter: in this case, this particular row will not be
- * counted ==> an error.
+ * counted ==> an error.
* @param table
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public
@@ -341,7 +342,7 @@ public class AggregationClient implements Closeable {
* @param tableName
* @param ci
* @param scan
- * @return sum
+ * @return sum <S>
* @throws Throwable
*/
public S sum(
@@ -358,7 +359,7 @@ public class AggregationClient implements Closeable {
* @param table
* @param ci
* @param scan
- * @return sum
+ * @return sum <S>
* @throws Throwable
*/
public
@@ -485,7 +486,7 @@ public class AggregationClient implements Closeable {
* @param tableName
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public
@@ -504,7 +505,7 @@ public class AggregationClient implements Closeable {
* @param table
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public double avg(
@@ -593,7 +594,7 @@ public class AggregationClient implements Closeable {
* @param tableName
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public
@@ -613,7 +614,7 @@ public class AggregationClient implements Closeable {
* @param table
* @param ci
* @param scan
- * @return
+ * @return <R, S>
* @throws Throwable
*/
public double std(
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
index ecf45953068..cf28c91ba3a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java
@@ -203,14 +203,9 @@ public class MyLittleHBaseClient {
- HBase Home Page
- Hadoop Home Page
-
-
-
See also the section in the HBase Reference Guide where it discusses
HBase Client. It
has section on how to access HBase from inside your multithreaded environment
how to control resources consumed client-side, etc.
-