From 682b8ab8a542a903e5807053282693e3a96bad2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A1bor=20Lipt=C3=A1k?= Date: Sun, 26 Apr 2015 21:07:45 -0400 Subject: [PATCH] HBASE-13569 Correct Javadoc (for Java8) Signed-off-by: Sean Busbey --- .../hadoop/hbase/HColumnDescriptor.java | 6 +-- .../org/apache/hadoop/hbase/HRegionInfo.java | 7 ++- .../apache/hadoop/hbase/HTableDescriptor.java | 2 +- .../hadoop/hbase/MetaTableAccessor.java | 4 +- .../hbase/client/AbstractClientScanner.java | 4 +- .../org/apache/hadoop/hbase/client/Admin.java | 12 ++--- .../client/ClientSmallReversedScanner.java | 3 +- .../hadoop/hbase/client/Durability.java | 4 +- .../hadoop/hbase/client/HBaseAdmin.java | 3 +- .../hadoop/hbase/client/HTableInterface.java | 2 +- .../hbase/client/HTableMultiplexer.java | 3 -- .../org/apache/hadoop/hbase/client/Put.java | 2 +- .../apache/hadoop/hbase/client/Result.java | 10 ++-- .../hadoop/hbase/client/ResultScanner.java | 2 +- .../hadoop/hbase/client/RetryingCallable.java | 4 +- .../RpcRetryingCallerWithReadReplicas.java | 3 +- .../org/apache/hadoop/hbase/client/Scan.java | 11 ++--- .../client/coprocessor/AggregationClient.java | 41 ++++++++-------- .../hadoop/hbase/client/package-info.java | 5 -- .../hbase/coprocessor/ColumnInterpreter.java | 31 ++++++------ .../hadoop/hbase/filter/FamilyFilter.java | 7 +-- .../apache/hadoop/hbase/filter/Filter.java | 1 - .../hadoop/hbase/filter/FilterBase.java | 18 +++---- .../hadoop/hbase/filter/FilterList.java | 8 ++-- .../hbase/filter/MultiRowRangeFilter.java | 2 +- .../hadoop/hbase/filter/PageFilter.java | 4 +- .../hadoop/hbase/filter/SkipFilter.java | 4 +- .../hadoop/hbase/ipc/AsyncRpcClient.java | 1 - .../apache/hadoop/hbase/ipc/ConnectionId.java | 2 +- .../hadoop/hbase/ipc/ServerRpcController.java | 3 +- .../hadoop/hbase/protobuf/ProtobufUtil.java | 6 +-- .../hadoop/hbase/quotas/QuotaTableUtil.java | 10 ++-- .../hadoop/hbase/regionserver/BloomType.java | 2 +- .../hbase/replication/ReplicationPeers.java | 2 +- .../security/visibility/CellVisibility.java | 7 +-- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 3 -- .../hbase/zookeeper/ZooKeeperWatcher.java | 1 - .../java/org/apache/hadoop/hbase/Cell.java | 47 +++++++++++-------- .../apache/hadoop/hbase/CellComparator.java | 4 +- .../apache/hadoop/hbase/CellScannable.java | 2 +- .../org/apache/hadoop/hbase/CellScanner.java | 2 +- .../org/apache/hadoop/hbase/HConstants.java | 3 +- .../org/apache/hadoop/hbase/KeyValue.java | 27 ++++++----- .../org/apache/hadoop/hbase/KeyValueUtil.java | 2 +- .../org/apache/hadoop/hbase/ServerName.java | 11 +++-- .../org/apache/hadoop/hbase/TableName.java | 4 +- .../hadoop/hbase/io/CellOutputStream.java | 2 +- .../org/apache/hadoop/hbase/io/TimeRange.java | 2 +- .../hbase/io/crypto/KeyStoreKeyProvider.java | 2 +- .../hadoopbackport/ThrottledInputStream.java | 8 ++-- .../hadoop/hbase/io/util/Dictionary.java | 4 +- .../apache/hadoop/hbase/security/User.java | 1 - .../org/apache/hadoop/hbase/types/Struct.java | 6 +-- .../hadoop/hbase/util/AbstractByteRange.java | 4 +- .../apache/hadoop/hbase/util/Addressing.java | 8 ++-- .../org/apache/hadoop/hbase/util/Base64.java | 2 +- .../apache/hadoop/hbase/util/ByteRange.java | 4 +- .../org/apache/hadoop/hbase/util/Bytes.java | 8 ++-- .../apache/hadoop/hbase/util/ClassSize.java | 4 +- .../hbase/util/DefaultEnvironmentEdge.java | 3 +- .../util/IncrementingEnvironmentEdge.java | 3 +- .../apache/hadoop/hbase/util/JenkinsHash.java | 4 +- .../apache/hadoop/hbase/util/KeyLocker.java | 5 +- .../hadoop/hbase/util/OrderedBytes.java | 9 ++-- .../org/apache/hadoop/hbase/util/Sleeper.java | 4 +- .../hadoop/hbase/metrics/BaseSource.java | 2 +- .../codec/prefixtree/PrefixTreeCodec.java | 3 +- .../codec/prefixtree/PrefixTreeSeeker.java | 8 ++-- .../prefixtree/decode/ArraySearcherPool.java | 3 +- .../decode/PrefixTreeArraySearcher.java | 11 +++-- .../prefixtree/encode/PrefixTreeEncoder.java | 15 +++--- .../encode/column/ColumnNodeWriter.java | 7 ++- .../encode/column/ColumnSectionWriter.java | 6 ++- .../prefixtree/encode/row/RowNodeWriter.java | 3 +- .../prefixtree/encode/tokenize/Tokenizer.java | 2 + .../encode/tokenize/TokenizerNode.java | 32 +++++++------ .../prefixtree/scanner/CellSearcher.java | 34 +++++++++----- .../scanner/ReversibleCellScanner.java | 4 +- .../hadoop/hbase/util/vint/UFIntTool.java | 8 ++-- .../hadoop/hbase/util/vint/UVIntTool.java | 4 +- .../hadoop/hbase/util/vint/UVLongTool.java | 4 +- .../hadoop/hbase/procedure2/Procedure.java | 6 +-- .../hbase/procedure2/SequentialProcedure.java | 4 +- .../protobuf/HBaseZeroCopyByteString.java | 6 +++ .../apache/hadoop/hbase/InterProcessLock.java | 2 +- .../example/HFileArchiveTableMonitor.java | 2 +- .../hbase/constraint/ConstraintException.java | 2 +- .../hadoop/hbase/constraint/package-info.java | 6 +-- .../SplitLogManagerCoordination.java | 2 +- .../ZKSplitLogManagerCoordination.java | 5 +- .../coprocessor/AggregateImplementation.java | 11 ++--- .../coprocessor/MultiRowMutationEndpoint.java | 10 ++-- .../hbase/coprocessor/RegionObserver.java | 8 ++-- .../hbase/coprocessor/package-info.java | 36 +++++++------- .../hbase/errorhandling/TimeoutException.java | 3 +- .../apache/hadoop/hbase/http/HttpServer.java | 13 ++--- .../hadoop/hbase/http/package-info.java | 3 +- .../org/apache/hadoop/hbase/io/FileLink.java | 2 +- .../org/apache/hadoop/hbase/io/HFileLink.java | 1 - .../org/apache/hadoop/hbase/io/WALLink.java | 4 +- .../apache/hadoop/hbase/io/hfile/HFile.java | 5 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 12 ++--- .../hbase/io/hfile/HFileReaderImpl.java | 2 +- .../hadoop/hbase/io/hfile/HFileScanner.java | 18 +++---- .../hadoop/hbase/io/hfile/LruBlockCache.java | 2 +- .../hbase/io/hfile/LruCachedBlockQueue.java | 2 +- .../hadoop/hbase/io/hfile/package-info.java | 2 +- .../hadoop/hbase/ipc/RpcCallContext.java | 2 +- .../mapreduce/MultiTableInputFormat.java | 2 +- .../hbase/mapreduce/TableMapReduceUtil.java | 9 ++-- .../mapreduce/TableRecordReaderImpl.java | 3 +- .../apache/hadoop/hbase/master/HMaster.java | 2 +- .../hbase/master/MasterRpcServices.java | 2 +- .../hadoop/hbase/master/RegionStateStore.java | 2 +- .../master/balancer/SimpleLoadBalancer.java | 4 +- .../balancer/StochasticLoadBalancer.java | 4 +- .../hbase/regionserver/ColumnTracker.java | 14 +++--- .../regionserver/CompactionRequestor.java | 4 +- .../hbase/regionserver/DefaultMemStore.java | 2 +- .../hbase/regionserver/DeleteTracker.java | 7 +-- .../regionserver/ExplicitColumnTracker.java | 7 +-- .../hbase/regionserver/HeapMemoryManager.java | 2 +- ...creasingToUpperBoundRegionSplitPolicy.java | 4 +- .../hadoop/hbase/regionserver/LruHashMap.java | 6 +-- .../hbase/regionserver/MemStoreChunkPool.java | 2 +- .../regionserver/MetricsRegionServer.java | 3 +- .../MiniBatchOperationInProgress.java | 2 +- .../NonReversedNonLazyKeyValueScanner.java | 2 +- .../hbase/regionserver/RSRpcServices.java | 8 ++-- .../hadoop/hbase/regionserver/Region.java | 3 +- .../hbase/regionserver/RegionScanner.java | 4 +- .../hbase/regionserver/ScanDeleteTracker.java | 7 +-- .../hadoop/hbase/regionserver/Store.java | 7 +-- .../hbase/regionserver/StoreScanner.java | 2 +- .../hbase/regionserver/wal/ReplayHLogKey.java | 1 - .../hbase/regionserver/wal/WALEdit.java | 14 +++--- .../regionserver/wal/WALEditsReplaySink.java | 4 +- .../replication/HBaseReplicationEndpoint.java | 1 - .../HBaseInterClusterReplicationEndpoint.java | 3 +- .../regionserver/ReplicationSink.java | 7 +-- .../regionserver/ReplicationSource.java | 4 +- .../ReplicationSourceManager.java | 2 + .../regionserver/ReplicationThrottler.java | 2 +- .../security/access/AccessControlLists.java | 1 + .../security/access/AccessController.java | 2 + .../security/visibility/VisibilityUtils.java | 2 +- .../snapshot/SnapshotDescriptionUtils.java | 14 +++--- .../hadoop/hbase/util/BloomFilterWriter.java | 2 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 9 ++-- .../apache/hadoop/hbase/util/HBaseFsck.java | 2 +- .../hadoop/hbase/util/HFileV1Detector.java | 6 ++- .../hadoop/hbase/util/MultiHConnection.java | 1 - .../hadoop/hbase/util/RegionSplitter.java | 14 +++--- .../hbase/util/ServerRegionReplicaUtil.java | 2 +- .../org/apache/hadoop/hbase/wal/WALKey.java | 4 +- .../hadoop/hbase/zookeeper/ZKSplitLog.java | 8 ++-- .../hbase/thrift/ThriftServerRunner.java | 3 +- .../hadoop/hbase/thrift/generated/Hbase.java | 4 -- 158 files changed, 510 insertions(+), 458 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 20cfbeff898..3da018a0e39 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -85,8 +85,8 @@ public class HColumnDescriptor implements Comparable { /** * Key for cache data into L1 if cache is set up with more than one tier. * To set in the shell, do something like this: - * hbase(main):003:0> create 't', - * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}} + * hbase(main):003:0> create 't', + * {NAME => 't', CONFIGURATION => {CACHE_DATA_IN_L1 => 'true'}} */ public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1"; @@ -115,7 +115,7 @@ public class HColumnDescriptor implements Comparable { /** * Retain all cells across flushes and compactions even if they fall behind * a delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. + * Scan#setRaw or pass RAW => true attribute in the shell. */ public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index adca3d7f1da..c134063c9a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -54,17 +54,21 @@ import com.google.protobuf.InvalidProtocolBufferException; * about the region. * * The region has a unique name which consists of the following fields: + *
    *
  • tableName : The name of the table
  • *
  • startKey : The startKey for the region.
  • *
  • regionId : A timestamp when the region is created.
  • *
  • replicaId : An id starting from 0 to differentiate replicas of the same region range * but hosted in separated servers. The same region range can be hosted in multiple locations.
  • *
  • encodedName : An MD5 encoded string for the region name.
  • + *
* *
Other than the fields in the region name, region info contains: + *
    *
  • endKey : the endKey for the region (exclusive)
  • *
  • split : Whether the region is split
  • *
  • offline : Whether the region is offline
  • + *
* * In 0.98 or before, a list of table's regions would fully cover the total keyspace, and at any * point in time, a row key always belongs to a single region, which is hosted in a single server. @@ -647,7 +651,7 @@ public class HRegionInfo implements Comparable { * by this region. For example, if the region is foo,a,g and this is * passed ["b","c"] or ["a","c"] it will return true, but if this is passed * ["b","z"] it will return false. - * @throws IllegalArgumentException if the range passed is invalid (ie end < start) + * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { @@ -1098,7 +1102,6 @@ public class HRegionInfo implements Comparable { * @param r Result to pull from * @return A pair of the {@link HRegionInfo} and the {@link ServerName} * (or null for server address if no address set in hbase:meta). - * @throws IOException * @deprecated use MetaTableAccessor methods for interacting with meta layouts */ @Deprecated diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index f2f00773e68..58067ead0fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -179,7 +179,7 @@ public class HTableDescriptor implements Comparable { /** * INTERNAL flag to indicate whether or not the memstore should be replicated - * for read-replicas (CONSISTENCY => TIMELINE). + * for read-replicas (CONSISTENCY => TIMELINE). */ public static final String REGION_MEMSTORE_REPLICATION = "REGION_MEMSTORE_REPLICATION"; private static final Bytes REGION_MEMSTORE_REPLICATION_KEY = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c53f99800f5..86e8d464994 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -180,7 +180,7 @@ public class MetaTableAccessor { } } - /** The delimiter for meta columns for replicaIds > 0 */ + /** The delimiter for meta columns for replicaIds > 0 */ protected static final char META_REPLICA_ID_DELIMITER = '_'; /** A regex for parsing server columns from meta. See above javadoc for meta layout */ @@ -1080,7 +1080,7 @@ public class MetaTableAccessor { /** * Fetch table states from META table * @param conn connection to use - * @return map {tableName -> state} + * @return map {tableName -> state} * @throws IOException */ public static Map getTableStates(Connection conn) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index dc325a3cfe1..7658faff8b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -54,11 +54,11 @@ public abstract class AbstractClientScanner implements ResultScanner { } /** - * Get nbRows rows. + * Get nbRows rows. * How many RPCs are made is determined by the {@link Scan#setCaching(int)} * setting (or hbase.client.scanner.caching in hbase-site.xml). * @param nbRows number of rows to return - * @return Between zero and nbRows RowResults. Scan is done + * @return Between zero and nbRows rowResults. Scan is done * if returned array is of zero-length (We never return null). * @throws IOException */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index f00d6c918b3..fcc0cae4e3d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -199,7 +199,7 @@ public interface Admin extends Abortable, Closeable { * * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved - * @throws MasterNotRunningException if master is not running + * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent * threads, the table may have been created between test-for-existence and attempt-at-creation). * @throws IOException if a remote or network exception occurs @@ -218,7 +218,7 @@ public interface Admin extends Abortable, Closeable { * @param endKey end of key range * @param numRegions the total number of regions to create * @throws IllegalArgumentException if the table name is reserved - * @throws MasterNotRunningException if master is not running + * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent * threads, the table may have been created between test-for-existence and attempt-at-creation). * @throws IOException @@ -235,7 +235,7 @@ public interface Admin extends Abortable, Closeable { * @param splitKeys array of split keys for the initial regions of the table * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated * and if the split key has empty byte array. - * @throws MasterNotRunningException if master is not running + * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent * threads, the table may have been created between test-for-existence and attempt-at-creation). * @throws IOException @@ -248,11 +248,11 @@ public interface Admin extends Abortable, Closeable { * It may throw ExecutionException if there was an error while executing the operation * or TimeoutException in case the wait timeout was not long enough to allow the * operation to complete. + * Throws IllegalArgumentException Bad table name, if the split keys + * are repeated and if the split key has empty byte array. * * @param desc table descriptor for table * @param splitKeys keys to check if the table has been created with all split keys - * @throws IllegalArgumentException Bad table name, if the split keys - * are repeated and if the split key has empty byte array. * @throws IOException if a remote or network exception occurs * @return the result of the async creation. You can use Future.get(long, TimeUnit) * to wait on the operation to complete. @@ -727,7 +727,7 @@ public interface Admin extends Abortable, Closeable { * @param destServerName The servername of the destination regionserver. If passed the empty byte * array we'll assign to a random server. A server name is made of host, port and startcode. * Here is an example: host187.example.com,60020,1289493121758 - * @throws UnknownRegionException Thrown if we can't find a region named + * @throws IOException if we can't find a region named * encodedRegionName */ void move(final byte[] encodedRegionName, final byte[] destServerName) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java index 28502dc0244..5fac93a232b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallReversedScanner.java @@ -39,10 +39,11 @@ import org.apache.hadoop.hbase.util.Bytes; import com.google.common.annotations.VisibleForTesting; /** + *

* Client scanner for small reversed scan. Generally, only one RPC is called to fetch the * scan results, unless the results cross multiple regions or the row count of * results exceed the caching. - *

+ *

* For small scan, it will get better performance than {@link ReversedClientScanner} */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java index bc9a4ed8894..9b35e04e261 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java @@ -47,13 +47,13 @@ public enum Durability { * Write the Mutation to the WAL synchronously. * The data is flushed to the filesystem implementation, but not necessarily to disk. * For HDFS this will flush the data to the designated number of DataNodes. - * See HADOOP-6313 + * See HADOOP-6313 */ SYNC_WAL, /** * Write the Mutation to the WAL synchronously and force the entries to disk. * (Note: this is currently not supported and will behave identical to {@link #SYNC_WAL}) - * See HADOOP-6313 + * See HADOOP-6313 */ FSYNC_WAL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 7047714ba51..a06fb2cef1d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -231,7 +231,8 @@ public class HBaseAdmin implements Admin { * The connection to master will be created when required by admin functions. * * @param connection The Connection instance to use - * @throws MasterNotRunningException, ZooKeeperConnectionException are not + * @throws MasterNotRunningException + * @throws ZooKeeperConnectionException are not * thrown anymore but kept into the interface for backward api compatibility * @deprecated Constructing HBaseAdmin objects manually has been deprecated. * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java index 84363079184..745c7709db0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableInterface.java @@ -126,7 +126,7 @@ public interface HTableInterface extends Table { * Executes all the buffered {@link Put} operations. *

* This method gets called once automatically for every {@link Put} or batch - * of {@link Put}s (when put(List) is used) when + * of {@link Put}s (when put(List<Put>) is used) when * {@link #isAutoFlush} is {@code true}. * @throws IOException if a remote or network exception occurs. * @deprecated as of 1.0.0. Replaced by {@link BufferedMutator#flush()} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 10308da4193..b1f5b9f1c2e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -121,7 +121,6 @@ public class HTableMultiplexer { * @param tableName * @param put * @return true if the request can be accepted by its corresponding buffer queue. - * @throws IOException */ public boolean put(TableName tableName, final Put put) { return put(tableName, put, this.retryNum); @@ -133,7 +132,6 @@ public class HTableMultiplexer { * @param tableName * @param puts * @return the list of puts which could not be queued - * @throws IOException */ public List put(TableName tableName, final List puts) { if (puts == null) @@ -169,7 +167,6 @@ public class HTableMultiplexer { * retried before dropping the request. * Return false if the queue is already full. * @return true if the request can be accepted by its corresponding buffer queue. - * @throws IOException */ public boolean put(final TableName tableName, final Put put, int retry) { if (retry <= 0) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index c895eb43764..717ea3fcb6e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -328,7 +328,7 @@ public class Put extends Mutation implements HeapSize, Comparable { /** * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. + * a value assigned to the given family & qualifier. * Both given arguments must match the KeyValue object to return true. * * @param family column family diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 666069cded1..702983bd321 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -204,7 +204,7 @@ public class Result implements CellScannable, CellScanner { /** * Return the array of Cells backing this Result instance. * - * The array is sorted from smallest -> largest using the + * The array is sorted from smallest -> largest using the * {@link CellComparator#COMPARATOR}. * * The array only contains what your Get or Scan specifies and no more. @@ -601,7 +601,7 @@ public class Result implements CellScannable, CellScanner { * Map of families to all versions of its qualifiers and values. *

* Returns a three level Map of the form: - * Map&family,Map<qualifier,Map<timestamp,value>>> + * Map&family,Map<qualifier,Map<timestamp,value>>> *

* Note: All other map returning methods make use of this map internally. * @return map from families to qualifiers to versions @@ -643,7 +643,7 @@ public class Result implements CellScannable, CellScanner { /** * Map of families to their most recent qualifiers and values. *

- * Returns a two level Map of the form: Map&family,Map<qualifier,value>> + * Returns a two level Map of the form: Map&family,Map<qualifier,value>> *

* The most recent version of each qualifier will be used. * @return map from families to qualifiers and value @@ -675,7 +675,7 @@ public class Result implements CellScannable, CellScanner { /** * Map of qualifiers to values. *

- * Returns a Map of the form: Map<qualifier,value> + * Returns a Map of the form: Map<qualifier,value> * @param family column family to get * @return map of qualifiers to values */ @@ -945,4 +945,4 @@ public class Result implements CellScannable, CellScanner { throw new UnsupportedOperationException("Attempting to modify readonly EMPTY_RESULT!"); } } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java index 6b7f1ddab36..d3efbdadbfe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultScanner.java @@ -42,7 +42,7 @@ public interface ResultScanner extends Closeable, Iterable { /** * @param nbRows number of rows to return - * @return Between zero and nbRows Results + * @return Between zero and nbRows results * @throws IOException e */ Result [] next(int nbRows) throws IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java index e468d3cf781..ea65fcf10e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallable.java @@ -24,7 +24,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** - * A Callable that will be retried. If {@link #call(int)} invocation throws exceptions, + * A Callable<T> that will be retried. If {@link #call(int)} invocation throws exceptions, * we will call {@link #throwable(Throwable, boolean)} with whatever the exception was. * @param */ @@ -42,7 +42,7 @@ public interface RetryingCallable { * make it so we succeed on next call (clear caches, do relookup of locations, etc.). * @param t * @param retrying True if we are in retrying mode (we are not in retrying mode when max - * retries == 1; we ARE in retrying mode if retries > 1 even when we are the last attempt) + * retries == 1; we ARE in retrying mode if retries > 1 even when we are the last attempt) */ void throwable(final Throwable t, boolean retrying); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java index 8f288818852..d610d8c5948 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java @@ -174,6 +174,7 @@ public class RpcRetryingCallerWithReadReplicas { } /** + *

* Algo: * - we put the query into the execution pool. * - after x ms, if we don't have a result, we add the queries for the secondary replicas @@ -186,7 +187,7 @@ public class RpcRetryingCallerWithReadReplicas { * - a call is a thread. Let's not multiply the number of thread by the number of replicas. * Server side, if we can cancel when it's still in the handler pool, it's much better, as a call * can take some i/o. - *

+ *

* Globally, the number of retries, timeout and so on still applies, but it's per replica, * not global. We continue until all retries are done, or all timeouts are exceeded. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index a0193fb2ed9..14b721bf08f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -727,10 +727,10 @@ public class Scan extends Query { * this can deliver huge perf gains when there's a cf with lots of data; however, it can * also lead to some inconsistent results, as follows: * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. + * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } + * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan + * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, + * { video => "my dog" } }. * - if there's a concurrent split and you have more than 2 column families, some rows may be * missing some column families. */ @@ -982,7 +982,6 @@ public class Scan extends Query { return ProtobufUtil.toScanMetrics(bytes); } - public Boolean isAsyncPrefetch() { return asyncPrefetch; } @@ -991,6 +990,4 @@ public class Scan extends Query { this.asyncPrefetch = asyncPrefetch; return this; } - - } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 5421e57a43f..594a459b169 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -64,16 +64,17 @@ import com.google.protobuf.Message; *

* This will serve as the client side handler for invoking the aggregate * functions. - *

    * For all aggregate functions, - *
  • start row < end row is an essential condition (if they are not + *
      + *
    • start row < end row is an essential condition (if they are not * {@link HConstants#EMPTY_BYTE_ARRAY}) *
    • Column family can't be null. In case where multiple families are * provided, an IOException will be thrown. An optional column qualifier can - * also be defined. + * also be defined.
    • *
    • For methods to find maximum, minimum, sum, rowcount, it returns the * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value. + * count, it returns a long value.
    • + *
    *

    Call {@link #close()} when done. */ @InterfaceAudience.Private @@ -109,10 +110,10 @@ public class AggregationClient implements Closeable { * @param tableName * @param ci * @param scan - * @return max val + * @return max val <R> * @throws Throwable * The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * & propagated to it. */ public R max( final TableName tableName, final ColumnInterpreter ci, final Scan scan) @@ -129,10 +130,10 @@ public class AggregationClient implements Closeable { * @param table * @param ci * @param scan - * @return max val + * @return max val <> * @throws Throwable * The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * & propagated to it. */ public R max(final Table table, final ColumnInterpreter ci, @@ -199,7 +200,7 @@ public class AggregationClient implements Closeable { * @param tableName * @param ci * @param scan - * @return min val + * @return min val <R> * @throws Throwable */ public R min( @@ -217,7 +218,7 @@ public class AggregationClient implements Closeable { * @param table * @param ci * @param scan - * @return min val + * @return min val <R> * @throws Throwable */ public @@ -269,11 +270,11 @@ public class AggregationClient implements Closeable { * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * counted ==> an error. * @param tableName * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public long rowCount( @@ -290,11 +291,11 @@ public class AggregationClient implements Closeable { * optimised the operation. In case qualifier is provided, I can't use the * filter as it may set the flag to skip to next row, but the value read is * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * counted ==> an error. * @param table * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public @@ -341,7 +342,7 @@ public class AggregationClient implements Closeable { * @param tableName * @param ci * @param scan - * @return sum + * @return sum <S> * @throws Throwable */ public S sum( @@ -358,7 +359,7 @@ public class AggregationClient implements Closeable { * @param table * @param ci * @param scan - * @return sum + * @return sum <S> * @throws Throwable */ public @@ -485,7 +486,7 @@ public class AggregationClient implements Closeable { * @param tableName * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public @@ -504,7 +505,7 @@ public class AggregationClient implements Closeable { * @param table * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public double avg( @@ -593,7 +594,7 @@ public class AggregationClient implements Closeable { * @param tableName * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public @@ -613,7 +614,7 @@ public class AggregationClient implements Closeable { * @param table * @param ci * @param scan - * @return + * @return <R, S> * @throws Throwable */ public double std( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java index ecf45953068..cf28c91ba3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/package-info.java @@ -203,14 +203,9 @@ public class MyLittleHBaseClient {

  • HBase Home Page
  • Hadoop Home Page
- - -

See also the section in the HBase Reference Guide where it discusses HBase Client. It has section on how to access HBase from inside your multithreaded environment how to control resources consumed client-side, etc.

- - */ package org.apache.hadoop.hbase.client; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index 43efb66d3db..e247c0867a1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -35,23 +35,23 @@ import com.google.protobuf.Message; * for an example. *

* Takes two generic parameters and three Message parameters. - * The cell value type of the interpreter is . + * The cell value type of the interpreter is <T>. * During some computations like sum, average, the return type can be different * than the cell value data type, for eg, sum of int cell values might overflow * in case of a int result, we should use Long for its result. Therefore, this * class mandates to use a different (promoted) data type for result of these - * computations . All computations are performed on the promoted data type - * . There is a conversion method - * {@link ColumnInterpreter#castToReturnType(Object)} which takes a type and - * returns a type. - * The AggregateImplementation uses PB messages to initialize the + * computations <S>. All computations are performed on the promoted data type + * <S>. There is a conversion method + * {@link ColumnInterpreter#castToReturnType(Object)} which takes a <T> type and + * returns a <S> type. + * The AggregateIm>lementation uses PB messages to initialize the * user's ColumnInterpreter implementation, and for sending the responses * back to AggregationClient. - * @param Cell value data type - * @param Promoted data type - * @param

PB message that is used to transport initializer specific bytes - * @param PB message that is used to transport Cell () instance - * @param PB message that is used to transport Promoted () instance + * @param T Cell value data type + * @param S Promoted data type + * @param P PB message that is used to transport initializer specific bytes + * @param Q PB message that is used to transport Cell (<T>) instance + * @param R PB message that is used to transport Promoted (<S>) instance */ @InterfaceAudience.Private public abstract class ColumnInterpreter { * This takes care if either of arguments are null. returns 0 if they are * equal or both are null; *

    - *
  • >0 if l1 > l2 or l1 is not null and l2 is null. - *
  • < 0 if l1 < l2 or l1 is null and l2 is not null. + *
  • > 0 if l1 > l2 or l1 is not null and l2 is null.
  • + *
  • < 0 if l1 < l2 or l1 is null and l2 is not null.
  • + *
*/ public abstract int compare(final T l1, final T l2); /** - * used for computing average of data values. Not providing the divide - * method that takes two values as it is not needed as of now. + * used for computing average of <S> data values. Not providing the divide + * method that takes two <S> values as it is not needed as of now. * @param o * @param l * @return Average diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java index e79a4d5476e..ecead8ca4de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java @@ -32,15 +32,16 @@ import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; import com.google.protobuf.InvalidProtocolBufferException; /** + *

* This filter is used to filter based on the column family. It takes an * operator (equal, greater, not equal, etc) and a byte [] comparator for the * column family portion of a key. - *

+ *

* This filter can be wrapped with {@link org.apache.hadoop.hbase.filter.WhileMatchFilter} and {@link org.apache.hadoop.hbase.filter.SkipFilter} * to add more control. - *

+ *

* Multiple filters can be combined using {@link org.apache.hadoop.hbase.filter.FilterList}. - *

+ *

* If an already known column family is looked for, use {@link org.apache.hadoop.hbase.client.Get#addFamily(byte[])} * directly rather than a filter. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index a2f90152f7e..f7598d686b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -261,7 +261,6 @@ public abstract class Filter { * @param pbBytes A pb serialized {@link Filter} instance * @return An instance of {@link Filter} made from bytes * @throws DeserializationException - * @throws IOException in case an I/O or an filter specific failure needs to be signaled. * @see #toByteArray */ public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java index 1bcd00aa761..08a68210d50 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterBase.java @@ -41,7 +41,7 @@ public abstract class FilterBase extends Filter { * Filters that are purely stateless and do nothing in their reset() methods can inherit * this null/empty implementation. * - * @inheritDoc + * {@inheritDoc} */ @Override public void reset() throws IOException { @@ -51,7 +51,7 @@ public abstract class FilterBase extends Filter { * Filters that do not filter by row key can inherit this implementation that * never filters anything. (ie: returns false). * - * @inheritDoc + * {@inheritDoc} * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. * Instead use {@link #filterRowKey(Cell)} */ @@ -72,7 +72,7 @@ public abstract class FilterBase extends Filter { * Filters that never filter all remaining can inherit this implementation that * never stops the filter early. * - * @inheritDoc + * {@inheritDoc} */ @Override public boolean filterAllRemaining() throws IOException { @@ -82,7 +82,7 @@ public abstract class FilterBase extends Filter { /** * By default no transformation takes place * - * @inheritDoc + * {@inheritDoc} */ @Override public Cell transformCell(Cell v) throws IOException { @@ -93,7 +93,7 @@ public abstract class FilterBase extends Filter { * Filters that never filter by modifying the returned List of Cells can * inherit this implementation that does nothing. * - * @inheritDoc + * {@inheritDoc} */ @Override public void filterRowCells(List ignored) throws IOException { @@ -103,7 +103,7 @@ public abstract class FilterBase extends Filter { * Fitlers that never filter by modifying the returned List of Cells can * inherit this implementation that does nothing. * - * @inheritDoc + * {@inheritDoc} */ @Override public boolean hasFilterRow() { @@ -115,7 +115,7 @@ public abstract class FilterBase extends Filter { * {@link #filterKeyValue(Cell)} can inherit this implementation that * never filters a row. * - * @inheritDoc + * {@inheritDoc} */ @Override public boolean filterRow() throws IOException { @@ -126,7 +126,7 @@ public abstract class FilterBase extends Filter { * Filters that are not sure which key must be next seeked to, can inherit * this implementation that, by default, returns a null Cell. * - * @inheritDoc + * {@inheritDoc} */ public Cell getNextCellHint(Cell currentCell) throws IOException { return null; @@ -136,7 +136,7 @@ public abstract class FilterBase extends Filter { * By default, we require all scan's column families to be present. Our * subclasses may be more precise. * - * @inheritDoc + * {@inheritDoc} */ public boolean isFamilyEssential(byte[] name) throws IOException { return true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java index 2f89251d897..8ba1ccb09de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FilterList.java @@ -41,14 +41,14 @@ import com.google.protobuf.InvalidProtocolBufferException; * Since you can use Filter Lists as children of Filter Lists, you can create a * hierarchy of filters to be evaluated. * - *
+ *
* {@link Operator#MUST_PASS_ALL} evaluates lazily: evaluation stops as soon as one filter does * not include the KeyValue. * - *
+ *
* {@link Operator#MUST_PASS_ONE} evaluates non-lazily: all filters are always evaluated. * - *
+ *
* Defaults to {@link Operator#MUST_PASS_ALL}. */ @InterfaceAudience.Public @@ -315,7 +315,7 @@ final public class FilterList extends Filter { * Filters that never filter by modifying the returned List of Cells can * inherit this implementation that does nothing. * - * @inheritDoc + * {@inheritDoc} */ @Override public void filterRowCells(List cells) throws IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index e2f159b76c7..c0419143c11 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -44,7 +44,7 @@ import com.google.protobuf.InvalidProtocolBufferException; * phoenix etc. However, both solutions are inefficient. Both of them can't utilize the range info * to perform fast forwarding during scan which is quite time consuming. If the number of ranges * are quite big (e.g. millions), join is a proper solution though it is slow. However, there are - * cases that user wants to specify a small number of ranges to scan (e.g. <1000 ranges). Both + * cases that user wants to specify a small number of ranges to scan (e.g. <1000 ranges). Both * solutions can't provide satisfactory performance in such case. MultiRowRangeFilter is to support * such usec ase (scan multiple row key ranges), which can construct the row key ranges from user * specified list and perform fast-forwarding during scan. Thus, the scan will be quite efficient. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index 7c68dd23704..adc9c54c5eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -31,11 +31,11 @@ import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; /** * Implementation of Filter interface that limits results to a specific page - * size. It terminates scanning once the number of filter-passed rows is > + * size. It terminates scanning once the number of filter-passed rows is > * the given page size. *

* Note that this filter cannot guarantee that the number of results returned - * to a client are <= page size. This is because the filter is applied + * to a client are <= page size. This is because the filter is applied * separately on different region servers. It does however optimize the scan of * individual HRegions by making sure that the page size is never exceeded * locally. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java index 71ea3c3bf3c..3aced13c4db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -39,8 +39,9 @@ import com.google.protobuf.InvalidProtocolBufferException; * entire row if any of its weights are zero. In this case, we want to prevent * rows from being emitted if a single key is filtered. Combine this filter * with a {@link ValueFilter}: + *

*

- *

+ * 
  * scan.setFilter(new SkipFilter(new ValueFilter(CompareOp.NOT_EQUAL,
  *     new BinaryComparator(Bytes.toBytes(0))));
  * 
@@ -48,6 +49,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
  * (since ValueFilter will not pass that Cell).
  * Without this filter, the other non-zero valued columns in the row would still
  * be emitted.
+ * 

*/ @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java index 2e4d0a66f28..005f03cefb0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java @@ -422,7 +422,6 @@ public class AsyncRpcClient extends AbstractRpcClient { * @param rpcTimeout default rpc operation timeout * * @return A rpc channel that goes via this rpc client instance. - * @throws IOException when channel could not be created */ public RpcChannel createRpcChannel(final ServerName sn, final User user, int rpcTimeout) { return new RpcChannelImplementation(this, sn, user, rpcTimeout); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index bbd2fc73ab9..33fc880a8c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -24,7 +24,7 @@ import java.net.InetSocketAddress; /** * This class holds the address and the user ticket, etc. The client connections - * to servers are uniquely identified by + * to servers are uniquely identified by <remoteAddress, ticket, serviceName> */ @InterfaceAudience.Private public class ConnectionId { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 5511cb1b0e4..aa407f70b56 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -35,7 +35,8 @@ import com.google.protobuf.RpcController; * When implementing {@link com.google.protobuf.Service} defined methods, * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client: * - * public void myMethod(RpcController controller, MyRequest request, RpcCallback done) { + * public void myMethod(RpcController controller, MyRequest request, + * RpcCallback<MyResponse> done) { * MyResponse response = null; * try { * // do processing diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index e816f61e55f..a87fd47983e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -252,7 +252,7 @@ public final class ProtobufUtil { * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to * znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new + * @return The passed bytes with magic prepended (Creates a new * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ public static byte [] prependPBMagic(final byte [] bytes) { @@ -2120,7 +2120,7 @@ public final class ProtobufUtil { } /** - * Convert a ListMultimap where key is username + * Convert a ListMultimap<String, TablePermission> where key is username * to a protobuf UserPermission * * @param perm the list of user and table permissions @@ -2374,7 +2374,7 @@ public final class ProtobufUtil { /** * Convert a protobuf UserTablePermissions to a - * ListMultimap where key is username. + * ListMultimap<String, TablePermission> where key is username. * * @param proto the protobuf UserPermission * @return the converted UserPermission diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java index 949179521fe..a8fec87cfde 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java @@ -51,11 +51,11 @@ import org.apache.hadoop.hbase.util.Strings; * Helper class to interact with the quota table. *
  *     ROW-KEY      FAM/QUAL        DATA
- *   n. q:s         
- *   t.     q:s         
- *   u.      q:s         
- *   u.      q:s.
- * u. q:s.: + * n.<namespace> q:s <global-quotas> + * t.<table> q:s <global-quotas> + * u.<user> q:s <global-quotas> + * u.<user> q:s.<table> <table-quotas> + * u.<user> q:s.<ns>: <namespace-quotas> * */ @InterfaceAudience.Private diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java index 75c967d3913..50b8b15d3bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/BloomType.java @@ -34,7 +34,7 @@ public enum BloomType { */ ROW, /** - * Bloom enabled with Table row & column (family+qualifier) as Key + * Bloom enabled with Table row & column (family+qualifier) as Key */ ROWCOL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index 359dbffecc1..8e80e06a6ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -125,7 +125,7 @@ public interface ReplicationPeers { * have to be connected. The state is read directly from the backing store. * @param peerId a short that identifies the cluster * @return true if replication is enabled, false otherwise. - * @throws IOException Throws if there's an error contacting the store + * @throws ReplicationException thrown if there's an error contacting the store */ boolean getStatusOfPeerFromBackingStore(String peerId) throws ReplicationException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 49685480477..765559f8d9c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -24,7 +24,8 @@ import org.apache.hadoop.hbase.util.Bytes; /** * This contains a visibility expression which can be associated with a cell. When it is set with a * Mutation, all the cells in that mutation will get associated with this expression. A visibility - * expression can contain visibility labels combined with logical operators AND(&), OR(|) and NOT(!) + * expression can contain visibility labels combined with logical + * operators AND(&), OR(|) and NOT(!) */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -51,7 +52,7 @@ public class CellVisibility { /** * Helps in quoting authentication Strings. Use this if unicode characters to * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * '"','\','&','|','!' */ public static String quote(String auth) { return quote(Bytes.toBytes(auth)); @@ -60,7 +61,7 @@ public class CellVisibility { /** * Helps in quoting authentication Strings. Use this if unicode characters to * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * '"','\','&','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index 5cbb06673ce..e71886a219e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -1206,8 +1206,6 @@ public class ZKUtil { * @param data data of node to create * @param cb * @param ctx - * @throws KeeperException if unexpected zookeeper exception - * @throws KeeperException.NodeExistsException if node already exists */ public static void asyncCreate(ZooKeeperWatcher zkw, String znode, byte [] data, final AsyncCallback.StringCallback cb, @@ -2027,7 +2025,6 @@ public class ZKUtil { /** * Recursively print the current state of ZK (non-transactional) * @param root name of the root directory in zk to print - * @throws KeeperException */ public static void logZKTree(ZooKeeperWatcher zkw, String root) { if (!LOG.isDebugEnabled()) return; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index d26874a1729..475e3852f2b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -656,7 +656,6 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { /** * Close the connection to ZooKeeper. * - * @throws InterruptedException */ @Override public void close() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java index 8f299cc7445..46ebeeb64a1 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java @@ -23,7 +23,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** - * The unit of storage in HBase consisting of the following fields:
+ * The unit of storage in HBase consisting of the following fields: + *
*
  * 1) row
  * 2) column family
@@ -33,30 +34,36 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  * 6) MVCC version
  * 7) value
  * 
- *

+ *

* Uniqueness is determined by the combination of row, column family, column qualifier, * timestamp, and type. - *

+ *

+ *

* The natural comparator will perform a bitwise comparison on row, column family, and column * qualifier. Less intuitively, it will then treat the greater timestamp as the lesser value with * the goal of sorting newer cells first. - *

+ *

+ *

* This interface should not include methods that allocate new byte[]'s such as those used in client * or debugging code. These users should use the methods found in the {@link CellUtil} class. * Currently for to minimize the impact of existing applications moving between 0.94 and 0.96, we * include the costly helper methods marked as deprecated. - *

- * Cell implements Comparable which is only meaningful when comparing to other keys in the + *

+ *

+ * Cell implements Comparable<Cell> which is only meaningful when + * comparing to other keys in the * same table. It uses CellComparator which does not work on the -ROOT- and hbase:meta tables. - *

+ *

+ *

* In the future, we may consider adding a boolean isOnHeap() method and a getValueBuffer() method * that can be used to pass a value directly from an off-heap ByteBuffer to the network without * copying into an on-heap byte[]. - *

+ *

+ *

* Historic note: the original Cell implementation (KeyValue) requires that all fields be encoded as * consecutive bytes in the same byte[], whereas this interface allows fields to reside in separate * byte[]'s. - *

+ *

*/ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -77,7 +84,7 @@ public interface Cell { int getRowOffset(); /** - * @return Number of row bytes. Must be < rowArray.length - offset. + * @return Number of row bytes. Must be < rowArray.length - offset. */ short getRowLength(); @@ -97,7 +104,7 @@ public interface Cell { int getFamilyOffset(); /** - * @return Number of family bytes. Must be < familyArray.length - offset. + * @return Number of family bytes. Must be < familyArray.length - offset. */ byte getFamilyLength(); @@ -117,7 +124,7 @@ public interface Cell { int getQualifierOffset(); /** - * @return Number of qualifier bytes. Must be < qualifierArray.length - offset. + * @return Number of qualifier bytes. Must be < qualifierArray.length - offset. */ int getQualifierLength(); @@ -148,7 +155,7 @@ public interface Cell { * cells in the memstore but is not retained forever. It may survive several flushes, but * generally becomes irrelevant after the cell's row is no longer involved in any operations that * require strict consistency. - * @return mvccVersion (always >= 0 if exists), or 0 if it no longer exists + * @return mvccVersion (always >= 0 if exists), or 0 if it no longer exists */ @Deprecated long getMvccVersion(); @@ -158,7 +165,7 @@ public interface Cell { * exists for cells in the memstore but is not retained forever. It will be kept for * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's * row is no longer involved in any operations that require strict consistency. - * @return seqId (always > 0 if exists), or 0 if it no longer exists + * @return seqId (always > 0 if exists), or 0 if it no longer exists */ long getSequenceId(); @@ -177,7 +184,7 @@ public interface Cell { int getValueOffset(); /** - * @return Number of value bytes. Must be < valueArray.length - offset. + * @return Number of value bytes. Must be < valueArray.length - offset. */ int getValueLength(); @@ -199,7 +206,7 @@ public interface Cell { /** * WARNING do not use, expensive. This gets an arraycopy of the cell's value. * - * Added to ease transition from 0.94 -> 0.96. + * Added to ease transition from 0.94 -> 0.96. * * @deprecated as of 0.96, use {@link CellUtil#cloneValue(Cell)} */ @@ -209,7 +216,7 @@ public interface Cell { /** * WARNING do not use, expensive. This gets an arraycopy of the cell's family. * - * Added to ease transition from 0.94 -> 0.96. + * Added to ease transition from 0.94 -> 0.96. * * @deprecated as of 0.96, use {@link CellUtil#cloneFamily(Cell)} */ @@ -219,7 +226,7 @@ public interface Cell { /** * WARNING do not use, expensive. This gets an arraycopy of the cell's qualifier. * - * Added to ease transition from 0.94 -> 0.96. + * Added to ease transition from 0.94 -> 0.96. * * @deprecated as of 0.96, use {@link CellUtil#cloneQualifier(Cell)} */ @@ -229,10 +236,10 @@ public interface Cell { /** * WARNING do not use, expensive. this gets an arraycopy of the cell's row. * - * Added to ease transition from 0.94 -> 0.96. + * Added to ease transition from 0.94 -> 0.96. * * @deprecated as of 0.96, use {@link CellUtil#getRowByte(Cell, int)} */ @Deprecated byte[] getRow(); -} \ No newline at end of file +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index e7ccbdec6c6..2d0c9406c7f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -111,7 +111,7 @@ public class CellComparator implements Comparator, Serializable { * @param b * @param ignoreSequenceid True if we are to compare the key portion only and ignore * the sequenceid. Set to false to compare key and consider sequenceid. - * @return 0 if equal, -1 if a < b, and +1 if a > b. + * @return 0 if equal, -1 if a < b, and +1 if a > b. */ private final int compare(final Cell a, final Cell b, boolean ignoreSequenceid) { // row @@ -682,4 +682,4 @@ public class CellComparator implements Comparator, Serializable { return result; } } -} \ No newline at end of file +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java index 79e677ee23f..0adb7b5a115 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScannable.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; /** * Implementer can return a CellScanner over its Cell content. * Class name is ugly but mimicing java.util.Iterable only we are about the dumber - * CellScanner rather than say Iterator. See CellScanner class comment for why we go + * CellScanner rather than say Iterator<Cell>. See CellScanner class comment for why we go * dumber than java.util.Iterator. */ @InterfaceAudience.Private diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java index f337122b7aa..b0460b73e3e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellScanner.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * may or may not point to a reusable cell implementation, so users of the CellScanner should not, * for example, accumulate a List of Cells. All of the references may point to the same object, * which would be the latest state of the underlying Cell. In short, the Cell is mutable. - *

+ *

* Typical usage: * *
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 674ef6e2eda..89a3f344651 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1133,7 +1133,8 @@ public final class HConstants {
 
   /**
    * When using bucket cache, this is a float that EITHER represents a percentage of total heap
-   * memory size to give to the cache (if < 1.0) OR, it is the capacity in megabytes of the cache.
+   * memory size to give to the cache (if < 1.0) OR, it is the capacity in
+   * megabytes of the cache.
    */
   public static final String BUCKET_CACHE_SIZE_KEY = "hbase.bucketcache.size";
 
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 315e9a37730..cd17bef489b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -64,18 +64,21 @@ import com.google.common.annotations.VisibleForTesting;
  * 

* KeyValue wraps a byte array and takes offsets and lengths into passed array at where to start * interpreting the content as KeyValue. The KeyValue format inside a byte array is: - * <keylength> <valuelength> <key> <value> Key is further decomposed as: - * <rowlength> <row> <columnfamilylength> <columnfamily> <columnqualifier> - * <timestamp> <keytype> + * <keylength> <valuelength> <key> <value> + * Key is further decomposed as: + * <rowlength> <row> <columnfamilylength> + * <columnfamily> <columnqualifier> + * <timestamp> <keytype> * The rowlength maximum is Short.MAX_SIZE, column family length maximum - * is Byte.MAX_SIZE, and column qualifier + key length must be < + * is Byte.MAX_SIZE, and column qualifier + key length must be < * Integer.MAX_SIZE. The column does not contain the family/qualifier delimiter, * {@link #COLUMN_FAMILY_DELIMITER}
* KeyValue can optionally contain Tags. When it contains tags, it is added in the byte array after - * the value part. The format for this part is: <tagslength><tagsbytes>. + * the value part. The format for this part is: <tagslength><tagsbytes>. * tagslength maximum is Short.MAX_SIZE. The tagsbytes * contain one or more tags where as each tag is of the form - * <taglength><tagtype><tagbytes>. tagtype is one byte and + * <taglength><tagtype><tagbytes>. + * tagtype is one byte and * taglength maximum is Short.MAX_SIZE and it includes 1 byte type length * and actual tag bytes length. */ @@ -1163,7 +1166,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * as JSON. Values are left out due to their tendency to be large. If needed, * they can be added manually. * - * @return the Map containing data from this key + * @return the Map<String,?> containing data from this key */ public Map toStringMap() { Map stringMap = new HashMap(); @@ -1878,7 +1881,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * Compares the only the user specified portion of a Key. This is overridden by MetaComparator. * @param left * @param right - * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @return 0 if equal, <0 if left smaller, >0 if right smaller */ protected int compareRowKey(final Cell left, final Cell right) { return CellComparator.COMPARATOR.compareRows(left, right); @@ -1893,7 +1896,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param right * @param roffset * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @return 0 if equal, <0 if left smaller, >0 if right smaller */ public int compareFlatKey(byte[] left, int loffset, int llength, byte[] right, int roffset, int rlength) { @@ -2005,7 +2008,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param right * @param roffset * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @return 0 if equal, <0 if left smaller, >0 if right smaller */ public int compareRows(byte [] left, int loffset, int llength, byte [] right, int roffset, int rlength) { @@ -2054,7 +2057,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * @param right * @param roffset * @param rlength - * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @return 0 if equal, <0 if left smaller, >0 if right smaller */ @Override // SamePrefixComparator public int compareIgnoringPrefix(int commonPrefix, byte[] left, @@ -2292,7 +2295,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId, * This is a HFile block index key optimization. * @param leftKey * @param rightKey - * @return 0 if equal, <0 if left smaller, >0 if right smaller + * @return 0 if equal, <0 if left smaller, >0 if right smaller * @deprecated Since 0.99.2; */ @Deprecated diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java index 5035666abe1..407c017990b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java @@ -526,7 +526,7 @@ public class KeyValueUtil { /*************** misc **********************************/ /** * @param cell - * @return cell if it is an instance of {@link KeyValue} else we will return a + * @return cell if it is an instance of {@link KeyValue} else we will return a * new {@link KeyValue} instance made from cell * @deprecated without any replacement. */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java index f6f89b41865..ad14f679e25 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException; * servers on same hostname and port (startcode is usually timestamp of server startup). The * {@link #toString()} format of ServerName is safe to use in the filesystem and as znode name * up in ZooKeeper. Its format is: - * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> '{@link #SERVERNAME_SEPARATOR}' <startcode>. + * <hostname> '{@link #SERVERNAME_SEPARATOR}' <port> + * '{@link #SERVERNAME_SEPARATOR}' <startcode>. * For example, if hostname is www.example.org, port is 1234, * and the startcode for the regionserver is 1212121212, then * the {@link #toString()} would be www.example.org,1234,1212121212. @@ -224,7 +225,7 @@ public class ServerName implements Comparable, Serializable { * @param port * @param startcode * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> + * startcode formatted as <hostname> ',' <port> ',' <startcode> */ static String getServerName(String hostName, int port, long startcode) { final StringBuilder name = new StringBuilder(hostName.length() + 1 + 5 + 1 + 13); @@ -237,10 +238,10 @@ public class ServerName implements Comparable, Serializable { } /** - * @param hostAndPort String in form of <hostname> ':' <port> + * @param hostAndPort String in form of <hostname> ':' <port> * @param startcode * @return Server name made of the concatenation of hostname, port and - * startcode formatted as <hostname> ',' <port> ',' <startcode> + * startcode formatted as <hostname> ',' <port> ',' <startcode> */ public static String getServerName(final String hostAndPort, final long startcode) { @@ -339,7 +340,7 @@ public class ServerName implements Comparable, Serializable { /** * @param str Either an instance of {@link ServerName#toString()} or a - * "'' ':' ''". + * "'<hostname>' ':' '<port>'". * @return A ServerName instance. */ public static ServerName parseServerName(final String str) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java index 0781e1c40aa..63066b3e82b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java @@ -126,8 +126,8 @@ public final class TableName implements Comparable { * The name may not start with '.' or '-'. * * Valid fully qualified table names: - * foo:bar, namespace=>foo, table=>bar - * org:foo.bar, namespace=org, table=>foo.bar + * foo:bar, namespace=>foo, table=>bar + * org:foo.bar, namespace=org, table=>foo.bar */ public static byte [] isLegalFullyQualifiedTableName(final byte[] tableName) { if (tableName == null || tableName.length <= 0) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java index 34f1bf73d69..c6406f2665c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Accepts a stream of Cells. This can be used to build a block of cells during compactions * and flushes, or to build a byte[] to send to the client. This could be backed by a - * List, but more efficient implementations will append results to a + * List<KeyValue>, but more efficient implementations will append results to a * byte[] to eliminate overhead, and possibly encode the cells further. *

To read Cells, use {@link org.apache.hadoop.hbase.CellScanner} * @see org.apache.hadoop.hbase.CellScanner diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index 8352e4eff8f..ad1c984a6b7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** * Represents an interval of version timestamps. *

- * Evaluated according to minStamp <= timestamp < maxStamp + * Evaluated according to minStamp <= timestamp < maxStamp * or [minStamp,maxStamp) in interval notation. *

* Only used internally; should not be accessed directly by clients. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java index 62167d6f84e..2d58a18ca7b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/KeyStoreKeyProvider.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; * on the local filesystem. It is configured with a URI passed in as a String * to init(). The URI should have the form: *

- *

    scheme://path?option1=value1&option2=value2
+ *
    scheme://path?option1=value1&option2=value2
*

* scheme can be either "jks" or "jceks", specifying the file based * providers shipped with every JRE. The latter is the certificate store for diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java index e1da69553cf..1bef2214457 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java @@ -61,7 +61,7 @@ public class ThrottledInputStream extends InputStream { rawStream.close(); } - /** @inheritDoc */ + /** {@inheritDoc} */ @Override public int read() throws IOException { throttle(); @@ -72,7 +72,7 @@ public class ThrottledInputStream extends InputStream { return data; } - /** @inheritDoc */ + /** {@inheritDoc} */ @Override public int read(byte[] b) throws IOException { throttle(); @@ -83,7 +83,7 @@ public class ThrottledInputStream extends InputStream { return readLen; } - /** @inheritDoc */ + /** {@inheritDoc} */ @Override public int read(byte[] b, int off, int len) throws IOException { throttle(); @@ -159,7 +159,7 @@ public class ThrottledInputStream extends InputStream { return totalSleepTime; } - /** @inheritDoc */ + /** {@inheritDoc} */ @Override public String toString() { return "ThrottledInputStream{" + diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java index 9ca096462b8..4a3d42fec87 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/Dictionary.java @@ -45,7 +45,7 @@ public interface Dictionary { * * @param data the byte array that we're looking up * @param offset Offset into data to add to Dictionary. - * @param length Length beyond offset that comprises entry; must be > 0. + * @param length Length beyond offset that comprises entry; must be > 0. * @return the index of the entry, or {@link #NOT_IN_DICTIONARY} if not found */ short findEntry(byte[] data, int offset, int length); @@ -59,7 +59,7 @@ public interface Dictionary { * * @param data the entry to add * @param offset Offset into data to add to Dictionary. - * @param length Length beyond offset that comprises entry; must be > 0. + * @param length Length beyond offset that comprises entry; must be > 0. * @return the index of the entry */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java index db71e8c5f0e..0efb4029be5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/User.java @@ -194,7 +194,6 @@ public abstract class User { * @param action * @return the result of the action * @throws IOException - * @throws InterruptedException */ @SuppressWarnings({ "rawtypes", "unchecked" }) public static T runAsLoginUser(PrivilegedExceptionAction action) throws IOException { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java index 4ba15ec2868..550088a1261 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/Struct.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.util.PositionedByteRange; * scenario where the end of the buffer has been reached but there are still * nullable fields remaining in the {@code Struct} definition. When this * happens, it will produce null entries for the remaining values. For example: + *

*
  * StructBuilder builder = new StructBuilder()
  *     .add(OrderedNumeric.ASCENDING) // nullable
@@ -57,11 +58,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
  * Object[] val = new Object[] { BigDecimal.ONE, "foo" };
  * shorter.encode(buf1, val); // write short value with short Struct
  * buf1.setPosition(0); // reset position marker, prepare for read
- * longer.decode(buf1); // => { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
+ * longer.decode(buf1); // => { BigDecimal.ONE, "foo", null } ; long Struct reads implied null
  * longer.encode(buf2, val); // write short value with long struct
- * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // => true; long Struct skips writing null
+ * Bytes.equals(buf1.getBytes(), buf2.getBytes()); // => true; long Struct skips writing null
  * 
- *

*

Sort Order

*

* {@code Struct} instances sort according to the composite order of their diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java index cd416581395..b151b8985cf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractByteRange.java @@ -33,7 +33,7 @@ public abstract class AbstractByteRange implements ByteRange { // reuse objects of this class /** - * The array containing the bytes in this range. It will be >= length. + * The array containing the bytes in this range. It will be >= length. */ protected byte[] bytes; @@ -44,7 +44,7 @@ public abstract class AbstractByteRange implements ByteRange { protected int offset; /** - * The number of bytes in the range. Offset + length must be <= bytes.length + * The number of bytes in the range. Offset + length must be <= bytes.length */ protected int length; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java index fce0d405795..31fb1f557ea 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Addressing.java @@ -37,7 +37,7 @@ public class Addressing { public static final String HOSTNAME_PORT_SEPARATOR = ":"; /** - * @param hostAndPort Formatted as <hostname> ':' <port> + * @param hostAndPort Formatted as <hostname> ':' <port> * @return An InetSocketInstance */ public static InetSocketAddress createInetSocketAddressFromHostAndPortStr( @@ -50,7 +50,7 @@ public class Addressing { * @param port Server port * @return Returns a concatenation of hostname and * port in following - * form: <hostname> ':' <port>. For example, if hostname + * form: <hostname> ':' <port>. For example, if hostname * is example.org and port is 1234, this method will return * example.org:1234 */ @@ -59,7 +59,7 @@ public class Addressing { } /** - * @param hostAndPort Formatted as <hostname> ':' <port> + * @param hostAndPort Formatted as <hostname> ':' <port> * @return The hostname portion of hostAndPort */ public static String parseHostname(final String hostAndPort) { @@ -71,7 +71,7 @@ public class Addressing { } /** - * @param hostAndPort Formatted as <hostname> ':' <port> + * @param hostAndPort Formatted as <hostname> ':' <port> * @return The port portion of hostAndPort */ public static int parsePort(final String hostAndPort) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java index d1f4f208ec8..a22133d27d7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Base64.java @@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *

*
    *
  • v2.2.1 - Fixed bug using URL_SAFE and ORDERED encodings. Fixed bug - * when using very small files (~< 40 bytes).
  • + * when using very small files (~< 40 bytes). *
  • v2.2 - Added some helper methods for encoding/decoding directly from * one file to the next. Also added a main() method to support command * line encoding/decoding from one file to the next. Also added these diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java index 88b728f52ec..d547db11784 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRange.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *

    *

    * This interface differs from ByteBuffer: + *

    + *
      *
    • On-heap bytes only
    • *
    • Raw {@code byte} access only; does not encode other primitives.
    • *
    • Implements {@code equals(Object)}, {@code #hashCode()}, and @@ -46,7 +48,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; *
    • Can be reused in tight loops like a major compaction which can save * significant amounts of garbage. (Without reuse, we throw off garbage like * this thing.)
    • - *

      + *
    *

    * Mutable, and always evaluates {@code #equals(Object)}, {@code #hashCode()}, * and {@code #compareTo(ByteRange)} based on the current contents. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 5d452608ecb..683b559e968 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1377,7 +1377,7 @@ public class Bytes implements Comparable { * @param offset Offset into array at which vint begins. * @throws java.io.IOException e * @return deserialized long from buffer. - * @deprecated Use {@link #readAsVLong(byte[], int)} instead. + * @deprecated Use {@link #readAsVLong(byte[],int)} instead. */ @Deprecated public static long readVLong(final byte [] buffer, final int offset) @@ -1409,7 +1409,7 @@ public class Bytes implements Comparable { /** * @param left left operand * @param right right operand - * @return 0 if equal, < 0 if left is less than right, etc. + * @return 0 if equal, < 0 if left is less than right, etc. */ public static int compareTo(final byte [] left, final byte [] right) { return LexicographicalComparerHolder.BEST_COMPARER. @@ -1425,7 +1425,7 @@ public class Bytes implements Comparable { * @param offset2 Where to start comparing in the right buffer * @param length1 How much to compare from the left buffer * @param length2 How much to compare from the right buffer - * @return 0 if equal, < 0 if left is less than right, etc. + * @return 0 if equal, < 0 if left is less than right, etc. */ public static int compareTo(byte[] buffer1, int offset1, int length1, byte[] buffer2, int offset2, int length2) { @@ -2213,7 +2213,7 @@ public class Bytes implements Comparable { * Bytewise binary increment/deincrement of long contained in byte array * on given amount. * - * @param value - array of bytes containing long (length <= SIZEOF_LONG) + * @param value - array of bytes containing long (length <= SIZEOF_LONG) * @param amount value will be incremented on (deincremented if negative) * @return array of bytes containing incremented long (length == SIZEOF_LONG) */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index 9f5a88b0628..77acf9be97c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -305,7 +305,7 @@ public class ClassSize { /** * Aligns a number to 8. * @param num number to align to 8 - * @return smallest number >= input that is a multiple of 8 + * @return smallest number >= input that is a multiple of 8 */ public static int align(int num) { return (int)(align((long)num)); @@ -314,7 +314,7 @@ public class ClassSize { /** * Aligns a number to 8. * @param num number to align to 8 - * @return smallest number >= input that is a multiple of 8 + * @return smallest number >= input that is a multiple of 8 */ public static long align(long num) { //The 7 comes from that the alignSize is 8 which is the number of bytes diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java index 17ed7b79db5..1096a17f377 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/DefaultEnvironmentEdge.java @@ -27,8 +27,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; public class DefaultEnvironmentEdge implements EnvironmentEdge { /** * {@inheritDoc} - *

    + *

    * This implementation returns {@link System#currentTimeMillis()} + *

    */ @Override public long currentTime() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java index 7b5ecd00b64..482c5f04744 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/IncrementingEnvironmentEdge.java @@ -45,9 +45,10 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge { /** * {@inheritDoc} - *

    + *

    * This method increments a known value for the current time each time this * method is called. The first value is 1. + *

    */ @Override public synchronized long currentTime() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java index 8ee214d398f..789bd8d0594 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/JenkinsHash.java @@ -66,11 +66,11 @@ public class JenkinsHash extends Hash { *

    The best hash table sizes are powers of 2. There is no need to do mod * a prime (mod is sooo slow!). If you need less than 32 bits, use a bitmask. * For example, if you need only 10 bits, do - * h = (h & hashmask(10)); + * h = (h & hashmask(10)); * In which case, the hash table should have hashsize(10) elements. * *

    If you are hashing n strings byte[][] k, do it like this: - * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); + * for (int i = 0, h = 0; i < n; ++i) h = hash( k[i], h); * *

    By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this * code any way you wish, private, educational, or commercial. It's free. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java index 2e69291b707..5398582c393 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/KeyLocker.java @@ -36,8 +36,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; * A utility class to manage a set of locks. Each lock is identified by a String which serves * as a key. Typical usage is:

    * class Example{ - * private final static KeyLocker locker = new Locker(); - *

    + * private final static KeyLocker<String> locker = new Locker<String>(); + *

    + *

    * public void foo(String s){ * Lock lock = locker.acquireLock(s); * try { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java index 20282ff8fba..499e34c3dea 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java @@ -40,7 +40,8 @@ import com.google.common.annotations.VisibleForTesting; * Each value is encoded as one or more bytes. The first byte of the encoding, * its meaning, and a terse description of the bytes that follow is given by * the following table: - *

+ *

+ *
* * * @@ -63,7 +64,6 @@ import com.google.common.annotations.VisibleForTesting; * * *
Content TypeEncoding
NULL0x05
negative infinity0x07
variable length BLOB0x35, B
byte-for-byte BLOB0x36, X
- *

* *

Null Encoding

*

@@ -258,8 +258,8 @@ import com.google.common.annotations.VisibleForTesting; * values are 5 bytes in length. *

*

- * {@code OrderedBytes} encodings are heavily influenced by the SQLite4 Key + * {@code OrderedBytes} encodings are heavily influenced by the + * SQLite4 Key * Encoding. Slight deviations are make in the interest of order * correctness and user extensibility. Fixed-width {@code Long} and * {@link Double} encodings are based on implementations from the now defunct @@ -1408,6 +1408,7 @@ public class OrderedBytes { * -Double.MIN_VALUE < -0.0 < +0.0; < Double.MIN_VALUE < ... * < Double.MAX_VALUE < Double.POSITIVE_INFINITY < Double.NaN *

+ *

* Floating point numbers are encoded as specified in IEEE 754. A 64-bit * double precision float consists of a sign bit, 11-bit unsigned exponent * encoded in offset-1023 notation, and a 52-bit significand. The format is diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java index 4ec08204c23..8e7751ddfac 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java @@ -68,9 +68,9 @@ public class Sleeper { } /** - * Sleep for period adjusted by passed startTime + * Sleep for period adjusted by passed startTime * @param startTime Time some task started previous to now. Time to sleep - * will be docked current time minus passed startTime. + * will be docked current time minus passed startTime. */ public void sleep(final long startTime) { if (this.stopper.isStopped()) { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 4c143358ee6..3ab783af394 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -102,7 +102,7 @@ public interface BaseSource { /** * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * This is in ObjectName format. With the default context being Hadoop -> HBase */ String getMetricsJmxContext(); diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java index f44a445f0b5..f703eef4ff1 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeCodec.java @@ -47,9 +47,10 @@ import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.io.WritableUtils; /** + *

* This class is created via reflection in DataBlockEncoding enum. Update the enum if class name or * package changes. - *

+ *

* PrefixTreeDataBlockEncoder implementation of DataBlockEncoder. This is the primary entry point * for PrefixTree encoding and decoding. Encoding is delegated to instances of * {@link PrefixTreeEncoder}, and decoding is delegated to instances of diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java index 73e8ab42dba..a4b4c3538ee 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/PrefixTreeSeeker.java @@ -63,8 +63,9 @@ public class PrefixTreeSeeker implements EncodedSeeker { } /** + *

* Currently unused. - *

+ *

* TODO performance leak. should reuse the searchers. hbase does not currently have a hook where * this can be called */ @@ -110,12 +111,13 @@ public class PrefixTreeSeeker implements EncodedSeeker { } /** + *

* Currently unused. - *

+ *

* A nice, lightweight reference, though the underlying cell is transient. This method may return * the same reference to the backing PrefixTreeCell repeatedly, while other implementations may * return a different reference for each Cell. - *

+ *

* The goal will be to transition the upper layers of HBase, like Filters and KeyValueHeap, to * use this method instead of the getKeyValue() methods above. */ diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java index effad571da7..f0b249f531d 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/ArraySearcherPool.java @@ -25,10 +25,11 @@ import java.util.concurrent.LinkedBlockingQueue; import org.apache.hadoop.hbase.classification.InterfaceAudience; /** + *

* Pools PrefixTreeArraySearcher objects. Each Searcher can consist of hundreds or thousands of * objects and 1 is needed for each HFile during a Get operation. With tens of thousands of * Gets/second, reusing these searchers may save a lot of young gen collections. - *

+ *

* Alternative implementation would be a ByteBufferSearcherPool (not implemented yet). */ @InterfaceAudience.Private diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java index ec54c2aea6f..eb0e41f9d70 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java @@ -28,10 +28,11 @@ import org.apache.hadoop.hbase.codec.prefixtree.scanner.CellSearcher; import com.google.common.primitives.UnsignedBytes; /** + *

* Searcher extends the capabilities of the Scanner + ReversibleScanner to add the ability to * position itself on a requested Cell without scanning through cells before it. The PrefixTree is * set up to be a Trie of rows, so finding a particular row is extremely cheap. - *

+ *

* Once it finds the row, it does a binary search through the cells inside the row, which is not as * fast as the trie search, but faster than iterating through every cell like existing block * formats @@ -309,8 +310,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im /****************** complete seek when token mismatch ******************/ /** - * @param searcherIsAfterInputKey <0: input key is before the searcher's position
- * >0: input key is after the searcher's position + * @param searcherIsAfterInputKey <0: input key is before the searcher's position
+ * >0: input key is after the searcher's position */ protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) { if (searcherIsAfterInputKey < 0) {//searcher position is after the input key, so back up @@ -337,8 +338,8 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im } /** - * @param searcherIsAfterInputKey <0: input key is before the searcher's position
- * >0: input key is after the searcher's position + * @param searcherIsAfterInputKey <0: input key is before the searcher's position
+ * >0: input key is after the searcher's position */ protected CellScannerPosition fixRowTokenMissForward(int searcherIsAfterInputKey) { if (searcherIsAfterInputKey < 0) {//searcher position is after the input key diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java index 3e4b75c53e1..926cf30bc8a 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/PrefixTreeEncoder.java @@ -46,12 +46,12 @@ import org.apache.hadoop.io.WritableUtils; /** * This is the primary class for converting a CellOutputStream into an encoded byte[]. As Cells are * added they are completely copied into the various encoding structures. This is important because - * usually the cells being fed in during compactions will be transient.
- *
- * Usage:
- * 1) constructor
- * 4) append cells in sorted order: write(Cell cell)
- * 5) flush()
+ * usually the cells being fed in during compactions will be transient.
+ *
+ * Usage:
+ * 1) constructor
+ * 4) append cells in sorted order: write(Cell cell)
+ * 5) flush()
*/ @InterfaceAudience.Private public class PrefixTreeEncoder implements CellOutputStream { @@ -391,10 +391,11 @@ public class PrefixTreeEncoder implements CellOutputStream { } /** + *

* The following "compile" methods do any intermediate work necessary to transform the cell * fragments collected during the writing phase into structures that are ready to write to the * outputStream. - *

+ *

* The family and qualifier treatment is almost identical, as is timestamp and mvccVersion. */ diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java index c1eb03da4a4..467e7addb06 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnNodeWriter.java @@ -32,14 +32,17 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool; import org.apache.hadoop.hbase.util.vint.UVIntTool; /** + *

* Column nodes can be either family nodes or qualifier nodes, as both sections encode similarly. * The family and qualifier sections of the data block are made of 1 or more of these nodes. - *

- * Each node is composed of 3 sections:
+ *

+ * Each node is composed of 3 sections:
+ *
    *
  • tokenLength: UVInt (normally 1 byte) indicating the number of token bytes *
  • token[]: the actual token bytes *
  • parentStartPosition: the offset of the next node from the start of the family or qualifier * section + *
*/ @InterfaceAudience.Private public class ColumnNodeWriter{ diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java index 3ceae633bfb..b30daf69175 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/column/ColumnSectionWriter.java @@ -34,9 +34,10 @@ import org.apache.hadoop.hbase.util.vint.UFIntTool; import com.google.common.collect.Lists; /** + *

* Takes the tokenized family or qualifier data and flattens it into a stream of bytes. The family * section is written after the row section, and qualifier section after family section. - *

+ *

* The family and qualifier tries, or "column tries", are structured differently than the row trie. * The trie cannot be reassembled without external data about the offsets of the leaf nodes, and * these external pointers are stored in the nubs and leaves of the row trie. For each cell in a @@ -45,12 +46,13 @@ import com.google.common.collect.Lists; * comprises the column name. To assemble the column name, the trie is traversed in reverse (right * to left), with the rightmost tokens pointing to the start of their "parent" node which is the * node to the left. - *

+ *

* This choice was made to reduce the size of the column trie by storing the minimum amount of * offset data. As a result, to find a specific qualifier within a row, you must do a binary search * of the column nodes, reassembling each one as you search. Future versions of the PrefixTree might * encode the columns in both a forward and reverse trie, which would convert binary searches into * more efficient trie searches which would be beneficial for wide rows. + *

*/ @InterfaceAudience.Private public class ColumnSectionWriter { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java index 5c184bf46c5..35f264b6d27 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/row/RowNodeWriter.java @@ -188,8 +188,9 @@ public class RowNodeWriter{ * offsets into the timestamp/column data structures that are written in the middle of the block. * We use {@link UFIntTool} to encode these indexes/offsets to allow random access during a binary * search of a particular column/timestamp combination. - *

+ *

* Branch nodes will not have any data in these sections. + *

*/ protected void writeFamilyNodeOffsets(OutputStream os) throws IOException { diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java index 75a11addb3b..f44017b7b46 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/Tokenizer.java @@ -31,10 +31,12 @@ import com.google.common.collect.Lists; /** * Data structure used in the first stage of PrefixTree encoding: + *
    *
  • accepts a sorted stream of ByteRanges *
  • splits them into a set of tokens, each held by a {@link TokenizerNode} *
  • connects the TokenizerNodes via standard java references *
  • keeps a pool of TokenizerNodes and a reusable byte[] for holding all token content + *
*


* Mainly used for turning Cell rowKeys into a trie, but also used for family and qualifier * encoding. diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java index e51d5be2bf3..7da78a76d8c 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java @@ -35,12 +35,12 @@ import com.google.common.collect.Lists; * Individual node in a Trie structure. Each node is one of 3 types: *

  • Branch: an internal trie node that may have a token and must have multiple children, but does * not represent an actual input byte[], hence its numOccurrences is 0 - *
  • Leaf: a node with no children and where numOccurrences is >= 1. It's token represents the + *
  • Leaf: a node with no children and where numOccurrences is >= 1. It's token represents the * last bytes in the input byte[]s. *
  • Nub: a combination of a branch and leaf. Its token represents the last bytes of input - * byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s + * byte[]s and has numOccurrences >= 1, but it also has child nodes which represent input byte[]s * that add bytes to this nodes input byte[]. - *

    + *

    * Example inputs (numInputs=7): * 0: AAA * 1: AAA @@ -49,13 +49,13 @@ import com.google.common.collect.Lists; * 4: AAB * 5: AABQQ * 6: AABQQ - *

    + *

    * Resulting TokenizerNodes: - * AA <- branch, numOccurrences=0, tokenStartOffset=0, token.length=2 - * A <- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1 - * B <- nub, numOccurrences=3, tokenStartOffset=2, token.length=1 - * QQ <- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2 - *

    + * AA <- branch, numOccurrences=0, tokenStartOffset=0, token.length=2 + * A <- leaf, numOccurrences=2, tokenStartOffset=2, token.length=1 + * B <- nub, numOccurrences=3, tokenStartOffset=2, token.length=1 + * QQ <- leaf, numOccurrences=2, tokenStartOffset=3, token.length=2 + *

    * numInputs == 7 == sum(numOccurrences) == 0 + 2 + 3 + 2 */ @InterfaceAudience.Private @@ -236,13 +236,15 @@ public class TokenizerNode{ /** * Called when we need to convert a leaf node into a branch with 2 leaves. Comments inside the * method assume we have token BAA starting at tokenStartOffset=0 and are adding BOO. The output - * will be 3 nodes:
    - *
  • 1: B <- branch - *
  • 2: AA <- leaf - *
  • 3: OO <- leaf + * will be 3 nodes:
    + *
      + *
    • 1: B <- branch + *
    • 2: AA <- leaf + *
    • 3: OO <- leaf + *
    * - * @param numTokenBytesToRetain => 1 (the B) - * @param bytes => BOO + * @param numTokenBytesToRetain => 1 (the B) + * @param bytes => BOO */ protected void split(int numTokenBytesToRetain, final ByteRange bytes) { int childNodeDepth = nodeDepth; diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java index 7e83457e629..a3ae097f867 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java @@ -33,19 +33,22 @@ public interface CellSearcher extends ReversibleCellScanner { void resetToBeforeFirstEntry(); /** + *

    * Do everything within this scanner's power to find the key. Look forward and backwards. - *

    + *

    + *

    * Abort as soon as we know it can't be found, possibly leaving the Searcher in an invalid state. - *

    + *

    * @param key position the CellScanner exactly on this key * @return true if the cell existed and getCurrentCell() holds a valid cell */ boolean positionAt(Cell key); /** + *

    * Same as positionAt(..), but go to the extra effort of finding the previous key if there's no * exact match. - *

    + *

    * @param key position the CellScanner on this key or the closest cell before * @return AT if exact match
    * BEFORE if on last cell before key
    @@ -54,9 +57,10 @@ public interface CellSearcher extends ReversibleCellScanner { CellScannerPosition positionAtOrBefore(Cell key); /** + *

    * Same as positionAt(..), but go to the extra effort of finding the next key if there's no exact * match. - *

    + *

    * @param key position the CellScanner on this key or the closest cell after * @return AT if exact match
    * AFTER if on first cell after key
    @@ -65,43 +69,47 @@ public interface CellSearcher extends ReversibleCellScanner { CellScannerPosition positionAtOrAfter(Cell key); /** + *

    * Note: Added for backwards compatibility with * {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek} - *

    + *

    * Look for the key, but only look after the current position. Probably not needed for an * efficient tree implementation, but is important for implementations without random access such * as unencoded KeyValue blocks. - *

    + *

    * @param key position the CellScanner exactly on this key * @return true if getCurrent() holds a valid cell */ boolean seekForwardTo(Cell key); /** + *

    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no * exact match. - *

    + *

    * @param key - * @return AT if exact match
    - * AFTER if on first cell after key
    + * @return AT if exact match
    + * AFTER if on first cell after key
    * AFTER_LAST if key was after the last cell in this scanner's scope */ CellScannerPosition seekForwardToOrBefore(Cell key); /** + *

    * Same as seekForwardTo(..), but go to the extra effort of finding the next key if there's no * exact match. - *

    + *

    * @param key - * @return AT if exact match
    - * AFTER if on first cell after key
    + * @return AT if exact match
    + * AFTER if on first cell after key
    * AFTER_LAST if key was after the last cell in this scanner's scope */ CellScannerPosition seekForwardToOrAfter(Cell key); /** + *

    * Note: This may not be appropriate to have in the interface. Need to investigate. - *

    + *

    * Position the scanner in an invalid state after the last cell: CellScannerPosition.AFTER_LAST. * This is used by tests and for handling certain edge cases. */ diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java index 3823e7c6ad9..c15429b595e 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/ReversibleCellScanner.java @@ -35,7 +35,7 @@ public interface ReversibleCellScanner extends CellScanner { /** * Try to position the scanner one Cell before the current position. * @return true if the operation was successful, meaning getCurrentCell() will return a valid - * Cell.
    + * Cell.
    * false if there were no previous cells, meaning getCurrentCell() will return null. * Scanner position will be * {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST} @@ -46,7 +46,7 @@ public interface ReversibleCellScanner extends CellScanner { * Try to position the scanner in the row before the current row. * @param endOfRow true for the last cell in the previous row; false for the first cell * @return true if the operation was successful, meaning getCurrentCell() will return a valid - * Cell.
    + * Cell.
    * false if there were no previous cells, meaning getCurrentCell() will return null. * Scanner position will be * {@link org.apache.hadoop.hbase.codec.prefixtree.scanner.CellScannerPosition#BEFORE_FIRST} diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java index fc7c107125b..a3da9f0129e 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UFIntTool.java @@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; * This class converts between positive ints and 1-4 bytes that represent the int. All input ints * must be positive. Max values stored in N bytes are: * - * N=1: 2^8 => 256 - * N=2: 2^16 => 65,536 - * N=3: 2^24 => 16,777,216 - * N=4: 2^31 => 2,147,483,648 (Integer.MAX_VALUE) + * N=1: 2^8 => 256 + * N=2: 2^16 => 65,536 + * N=3: 2^24 => 16,777,216 + * N=4: 2^31 => 2,147,483,648 (Integer.MAX_VALUE) * * This was created to get most of the memory savings of a variable length integer when encoding * an array of input integers, but to fix the number of bytes for each integer to the number needed diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java index dd4095baff3..aeebd2c7fc7 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVIntTool.java @@ -42,7 +42,7 @@ public class UVIntTool { public static final byte[] MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, 7 }; - /********************* int -> bytes **************************/ + /********************* int -> bytes **************************/ public static int numBytes(int in) { if (in == 0) { @@ -79,7 +79,7 @@ public class UVIntTool { return numBytes; } - /******************** bytes -> int **************************/ + /******************** bytes -> int **************************/ public static int getInt(byte[] bytes) { return getInt(bytes, 0); diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java index b2437a8ad49..b55e0f6f73b 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/util/vint/UVLongTool.java @@ -43,7 +43,7 @@ public class UVLongTool{ MAX_VALUE_BYTES = new byte[] { -1, -1, -1, -1, -1, -1, -1, -1, 127 }; - /********************* long -> bytes **************************/ + /********************* long -> bytes **************************/ public static int numBytes(long in) {// do a check for illegal arguments if not protected if (in == 0) { @@ -77,7 +77,7 @@ public class UVLongTool{ return numBytes; } - /******************** bytes -> long **************************/ + /******************** bytes -> long **************************/ public static long getLong(byte[] bytes) { return getLong(bytes, 0); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index 00a12ebb73e..13de210c6b3 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -50,9 +50,9 @@ import com.google.protobuf.ByteString; * the return is a set of sub-procedures or null in case the procedure doesn't * have sub-procedures. Once the sub-procedures are successfully completed * the execute() method is called again, you should think at it as a stack: - * -> step 1 - * ---> step 2 - * -> step 1 + * -> step 1 + * ---> step 2 + * -> step 1 * * rollback() is called when the procedure or one of the sub-procedures is failed. * the rollback step is supposed to cleanup the resources created during the diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index bcb04249763..636a0377965 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProc /** * A SequentialProcedure describes one step in a procedure chain. - * -> Step 1 -> Step 2 -> Step 3 + * -> Step 1 -> Step 2 -> Step 3 * * The main difference from a base Procedure is that the execute() of a * SequentialProcedure will be called only once, there will be no second @@ -79,4 +79,4 @@ public abstract class SequentialProcedure extends Procedure - * Note: All previous tables will be removed in favor of these tables. + * Note: All previous tables will be removed in favor of these tables. * @param tables add each of the tables to be archived. */ public synchronized void setArchiveTables(List tables) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java index 31746b689d8..42da0eeacb0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; * {@link org.apache.hadoop.hbase.client.Put}. *

    Does NOT attempt the * {@link org.apache.hadoop.hbase.client.Put} multiple times, - * since the constraint should fail every time for + * since the constraint should fail every time for * the same {@link org.apache.hadoop.hbase.client.Put} (it should be * idempotent). */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java index 9bffc5ccef3..6729f7c0fe6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/package-info.java @@ -19,7 +19,6 @@ /** * Restrict the domain of a data attribute, often times to fulfill business rules/requirements. * -

    Table of Contents

    -

    Overview

    Constraints are used to enforce business rules in a database. @@ -127,9 +125,9 @@ public class IntegerConstraint extends BaseConstraint { public void check(Put p) throws ConstraintException { - Map<byte[], List<KeyValue>> familyMap = p.getFamilyMap(); + Map<byte[], List<KeyValue>> familyMap = p.getFamilyMap(); - for (List <KeyValue> kvs : familyMap.values()) { + for (List <KeyValue> kvs : familyMap.values()) { for (KeyValue kv : kvs) { // just make sure that we can actually pull out an int diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java index 917df5bc34d..67fe96a638f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogManagerCoordination.java @@ -137,7 +137,7 @@ public interface SplitLogManagerCoordination { * It removes recovering regions from Coordination * @param serverNames servers which are just recovered * @param isMetaRecovery whether current recovery is for the meta region on - * serverNames + * serverNames */ void removeRecoveringRegions(Set serverNames, Boolean isMetaRecovery) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java index 6619eaad2e3..7925cb0da15 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java @@ -290,7 +290,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements * region server hosting the region can allow reads to the recovered region * @param recoveredServerNameSet servers which are just recovered * @param isMetaRecovery whether current recovery is for the meta region on - * serverNames + * serverNames */ @Override public void removeRecoveringRegions(final Set recoveredServerNameSet, @@ -684,8 +684,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements /** * ZooKeeper implementation of - * {@link org.apache.hadoop.hbase.coordination. - * SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)} + * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)} */ @Override public void removeStaleRecoveringRegions(final Set knownFailedServers) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 81c933bbd01..cc78626bed6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -51,11 +51,11 @@ import com.google.protobuf.Service; * {@link ColumnInterpreter} is used to interpret column value. This class is * parameterized with the following (these are the types with which the {@link ColumnInterpreter} * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}): - * @param Cell value data type - * @param Promoted data type - * @param

    PB message that is used to transport initializer specific bytes - * @param PB message that is used to transport Cell () instance - * @param PB message that is used to transport Promoted () instance + * @param T Cell value data type + * @param S Promoted data type + * @param P PB message that is used to transport initializer specific bytes + * @param Q PB message that is used to transport Cell (<T>) instance + * @param R PB message that is used to transport Promoted (<S>) instance */ @InterfaceAudience.Private public class AggregateImplementation @@ -229,7 +229,6 @@ extends AggregateService implements CoprocessorService, Coprocessor { /** * Gives the row count for the given column family and column qualifier, in * the given row range as defined in the Scan object. - * @throws IOException */ @Override public void getRowNum(RpcController controller, AggregateRequest request, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java index 3e5acc2de9b..e771a9256c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MultiRowMutationEndpoint.java @@ -52,14 +52,14 @@ import com.google.protobuf.Service; * * Defines a protocol to perform multi row transactions. * See {@link MultiRowMutationEndpoint} for the implementation. - *
    + *
    * See * {@link HRegion#mutateRowsWithLocks(java.util.Collection, java.util.Collection)} * for details and limitations. - *
    + *
    * Example: - *

    - * List mutations = ...;
    + * 
    + * List<Mutation> mutations = ...;
      * Put p1 = new Put(row1);
      * Put p2 = new Put(row2);
      * ...
    @@ -73,7 +73,7 @@ import com.google.protobuf.Service;
      *    MultiRowMutationService.newBlockingStub(channel);
      * MutateRowsRequest mrm = mrmBuilder.build();
      * service.mutateRows(null, mrm);
    - * 
    + * */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @InterfaceStability.Evolving diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index 507a1bb95a7..93eb5f19583 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -344,7 +344,7 @@ public interface RegionObserver extends Coprocessor { * (e.getRegion() returns the parent region) * @throws IOException if an error occurred on the coprocessor * @deprecated Use preSplit( - * final ObserverContext c, byte[] splitRow) + * final ObserverContext<RegionCoprocessorEnvironment> c, byte[] splitRow) */ @Deprecated void preSplit(final ObserverContext c) throws IOException; @@ -1068,7 +1068,8 @@ public interface RegionObserver extends Coprocessor { *
  • * boolean filterRow() returning true
  • *
  • - * void filterRow(List kvs) removing all the kvs from the passed List
  • + * void filterRow(List<KeyValue> kvs) removing all the kvs + * from the passed List * * @param c the environment provided by the region server * @param s the scanner @@ -1095,7 +1096,8 @@ public interface RegionObserver extends Coprocessor { *
  • * boolean filterRow() returning true
  • *
  • - * void filterRow(List kvs) removing all the kvs from the passed List
  • + * void filterRow(List kvs) removing all the kvs from + * the passed List * * @param c the environment provided by the region server * @param s the scanner diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java index c4777e16033..d175aff410e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/package-info.java @@ -62,7 +62,7 @@ when the corresponding events happen. The master transitions regions through the following states:

        -unassigned -> pendingOpen -> open -> pendingClose -> closed. +unassigned -> pendingOpen -> open -> pendingClose -7gt; closed.

    Coprocessors have opportunity to intercept and handle events in pendingOpen, open, and pendingClose states. @@ -75,7 +75,7 @@ can piggyback or fail this process.

    • preOpen, postOpen: Called before and after the region is reported as - online to the master.
    • + online to the master.

    Open

    @@ -85,9 +85,9 @@ split, etc.). Coprocessors can piggyback administrative actions via:

    • preFlush, postFlush: Called before and after the memstore is flushed - into a new store file.
    • -

    • preCompact, postCompact: Called before and after compaction.
    • -

    • preSplit, postSplit: Called after the region is split.
    • + into a new store file. +

    • preCompact, postCompact: Called before and after compaction.
    • +
    • preSplit, postSplit: Called after the region is split.

    PendingClose

    @@ -99,7 +99,7 @@ an indication to this effect will be passed as an argument.

    • preClose and postClose: Called before and after the region is - reported as closed to the master.
    • + reported as closed to the master.

    @@ -109,23 +109,23 @@ observe and mediate client actions on the region:

    • preGet, postGet: Called before and after a client makes a Get - request.
    • + request.

    • preExists, postExists: Called before and after the client tests - for existence using a Get.
    • + for existence using a Get.

    • prePut and postPut: Called before and after the client stores a value. -
    • +

    • preDelete and postDelete: Called before and after the client - deletes a value.
    • + deletes a value.

    • preScannerOpen postScannerOpen: Called before and after the client - opens a new scanner.
    • + opens a new scanner.

    • preScannerNext, postScannerNext: Called before and after the client - asks for the next row on a scanner.
    • + asks for the next row on a scanner.

    • preScannerClose, postScannerClose: Called before and after the client - closes a scanner.
    • + closes a scanner.

    • preCheckAndPut, postCheckAndPut: Called before and after the client - calls checkAndPut().
    • + calls checkAndPut().

    • preCheckAndDelete, postCheckAndDelete: Called before and after the client - calls checkAndDelete().
    • + calls checkAndDelete().

    You can also extend abstract class BaseRegionObserverCoprocessor which @@ -245,7 +245,7 @@ recognize and load it.

    <path> must point to a jar, can be on any filesystem supported by the -Hadoop FileSystem object. +Hadoop FileSystem object.

    <class> is the coprocessor implementation class. A jar can contain more than one coprocessor implementation, but only one can be specified @@ -270,7 +270,7 @@ policy implementations, perhaps) ahead of observers. ":" + Coprocessor.Priority.USER); HBaseAdmin admin = new HBaseAdmin(this.conf); admin.createTable(htd); - +

    Chain of RegionObservers

    As described above, multiple coprocessors can be loaded at one region at the same time. In case of RegionObserver, you can have more than one @@ -278,8 +278,6 @@ RegionObservers register to one same hook point, i.e, preGet(), etc. When a region reach the hook point, the framework will invoke each registered RegionObserver by the order of assigned priority. - -
    */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java index 750f87c71d2..746c59b3ed1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/TimeoutException.java @@ -38,7 +38,8 @@ public class TimeoutException extends Exception { * Exception indicating that an operation attempt has timed out * @param start time the operation started (ms since epoch) * @param end time the timeout was triggered (ms since epoch) - * @param expected expected amount of time for the operation to complete (ms) (ideally, expected <= end-start) + * @param expected expected amount of time for the operation to complete (ms) + * (ideally, expected <= end-start) */ public TimeoutException(String sourceName, long start, long end, long expected) { super("Timeout elapsed! Source:" + sourceName + " Start:" + start + ", End:" + end diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index 84c3548cc24..4ce2d942cd7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -94,9 +94,9 @@ import com.sun.jersey.spi.container.servlet.ServletContainer; * Create a Jetty embedded server to answer http requests. The primary goal * is to serve up status information for the server. * There are three contexts: - * "/logs/" -> points to the log directory - * "/static/" -> points to common static files (src/webapps/static) - * "/" -> the jsp server code from (src/webapps/) + * "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) + * "/" -> the jsp server code from (src/webapps/) */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -425,7 +425,7 @@ public class HttpServer implements FilterContainer { /** * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/. + * The jsp scripts are taken from src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server * @param findPort whether the server should start at the given port and @@ -1108,13 +1108,14 @@ public class HttpServer implements FilterContainer { /** * Checks the user has privileges to access to instrumentation servlets. - *

    + *

    * If hadoop.security.instrumentation.requires.admin is set to FALSE * (default value) it always returns TRUE. - *

    + *

    * If hadoop.security.instrumentation.requires.admin is set to TRUE * it will check that if the current user is in the admin ACLS. If the user is * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + *

    * * @param servletContext the servlet context. * @param request the servlet request. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java index 7549a3efa43..e4a971a310f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/package-info.java @@ -16,9 +16,8 @@ * limitations under the License. */ /** - * *

    - * Copied from hadoop source code.
    + * Copied from hadoop source code.
    * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why. *

    */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java index 1c5a593b00a..3caf67f7f01 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FileLink.java @@ -91,7 +91,7 @@ import org.apache.hadoop.hbase.util.FSUtils; public class FileLink { private static final Log LOG = LogFactory.getLog(FileLink.class); - /** Define the Back-reference directory name prefix: .links-/ */ + /** Define the Back-reference directory name prefix: .links-<hfile>/ */ public static final String BACK_REFERENCES_DIRECTORY_PREFIX = ".links-"; /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index ff339518788..c17720c9fc6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -401,7 +401,6 @@ public class HFileLink extends FileLink { * @param rootDir root hbase directory * @param linkRefPath Link Back Reference path * @return full path of the referenced hfile - * @throws IOException on unexpected error. */ public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) { Pair p = parseBackReferenceName(linkRefPath.getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java index fc5bd5d0206..344d496b363 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/WALLink.java @@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.util.FSUtils; /** * WALLink describes a link to a WAL. * - * An wal can be in /hbase/.logs// - * or it can be in /hbase/.oldlogs/ + * An wal can be in /hbase/.logs/<server>/<wal> + * or it can be in /hbase/.oldlogs/<wal> * * The link checks first in the original path, * if it is not present it fallbacks to the archived path. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 35458a29b3e..d18dada7f1a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -119,11 +119,12 @@ import com.google.common.base.Preconditions; * File is made of data blocks followed by meta data blocks (if any), a fileinfo * block, data block index, meta data block index, and a fixed size trailer * which records the offsets at which file changes content type. - *
    <data blocks><meta blocks><fileinfo><data index><meta index><trailer>
    + *
    <data blocks><meta blocks><fileinfo><
    + * data index><meta index><trailer>
    * Each block has a bit of magic at its start. Block are comprised of * key/values. In data blocks, they are both byte arrays. Metadata blocks are * a String key and a byte array value. An empty file looks like this: - *
    <fileinfo><trailer>
    . That is, there are not data nor meta + *
    <fileinfo><trailer>
    . That is, there are not data nor meta * blocks present. *

    * TODO: Do scanners need to be able to take a start and end row? diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index a64bb948cdf..f60272f6533 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -67,16 +67,16 @@ import com.google.common.base.Preconditions; *

  • Uncompressed block size, excluding header, excluding checksum (4 bytes) *
  • The offset of the previous block of the same type (8 bytes). This is * used to be able to navigate to the previous block without going to the block - *
  • For minorVersions >=1, the ordinal describing checksum type (1 byte) - *
  • For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes) - *
  • For minorVersions >=1, the size of data on disk, including header, + *
  • For minorVersions >=1, the ordinal describing checksum type (1 byte) + *
  • For minorVersions >=1, the number of data bytes/checksum chunk (4 bytes) + *
  • For minorVersions >=1, the size of data on disk, including header, * excluding checksums (4 bytes) * *
  • *
  • Raw/Compressed/Encrypted/Encoded data. The compression algorithm is the * same for all the blocks in the {@link HFile}, similarly to what was done in * version 1. - *
  • For minorVersions >=1, a series of 4 byte checksums, one each for + *
  • For minorVersions >=1, a series of 4 byte checksums, one each for * the number of bytes specified by bytesPerChecksum. * * @@ -1239,8 +1239,8 @@ public class HFileBlock implements Cacheable { /** * Creates a block iterator over the given portion of the {@link HFile}. - * The iterator returns blocks starting with offset such that offset <= - * startOffset < endOffset. Returned blocks are always unpacked. + * The iterator returns blocks starting with offset such that offset <= + * startOffset < endOffset. Returned blocks are always unpacked. * * @param startOffset the offset of the block to start iteration with * @param endOffset the offset to end iteration at (exclusive) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index 642b6c769b5..c6655c11a34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -1002,7 +1002,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { /** * @param v - * @return True if v < 0 or v > current block buffer limit. + * @return True if v < 0 or v > current block buffer limit. */ protected final boolean checkLen(final int v) { return v < 0 || v > this.blockBuffer.limit(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java index 6b527f6ef3d..4d9990e9e59 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileScanner.java @@ -44,11 +44,11 @@ public interface HFileScanner { * Consider the cell stream of all the cells in the file, * c[0] .. c[n], where there are n cells in the file. * @param cell - * @return -1, if cell < c[0], no position; + * @return -1, if cell < c[0], no position; * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. + * 1, such that c[i] < cell, and scanner is left in position i. * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. + * c[i] < cell <= c[i+1]. * If there is no cell c[i+1] greater than or equal to the input cell, then the * scanner will position itself at the end of the file and next() will return * false when it is called. @@ -66,14 +66,14 @@ public interface HFileScanner { * c[0] .. c[n], where there are n cellc in the file after * current position of HFileScanner. * The scanner will position itself between c[i] and c[i+1] where - * c[i] < cell <= c[i+1]. + * c[i] < cell <= c[i+1]. * If there is no cell c[i+1] greater than or equal to the input cell, then the * scanner will position itself at the end of the file and next() will return * false when it is called. * @param cell Cell to find (should be non-null) - * @return -1, if cell < c[0], no position; + * @return -1, if cell < c[0], no position; * 0, such that c[i] = cell and scanner is left in position i; and - * 1, such that c[i] < cell, and scanner is left in position i. + * 1, such that c[i] < cell, and scanner is left in position i. * @throws IOException */ int reseekTo(Cell cell) throws IOException; @@ -82,9 +82,9 @@ public interface HFileScanner { * Consider the cell stream of all the cells in the file, * c[0] .. c[n], where there are n cells in the file. * @param cell Cell to find - * @return false if cell <= c[0] or true with scanner in position 'i' such - * that: c[i] < cell. Furthermore: there may be a c[i+1], such that - * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will + * @return false if cell <= c[0] or true with scanner in position 'i' such + * that: c[i] < cell. Furthermore: there may be a c[i+1], such that + * c[i] < cell <= c[i+1] but there may also NOT be a c[i+1], and next() will * return false (EOF). * @throws IOException */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 18dcbb0d974..806ddc97b58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -480,7 +480,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } /** - * Evict the block, and it will be cached by the victim handler if exists && + * Evict the block, and it will be cached by the victim handler if exists && * block may be read again later * @param block * @param evictedByEvictionProcess true if the given block is evicted by diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java index 1624082d5c5..0b28d72d420 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.io.HeapSize; /** * A memory-bound queue that will grow until an element brings - * total size >= maxSize. From then on, only entries that are sorted larger + * total size >= maxSize. From then on, only entries that are sorted larger * than the smallest current entry will be inserted/replaced. * *

    Use this when you want to find the largest elements (according to their diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java index f2986982726..d4a279cb996 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/package-info.java @@ -37,7 +37,7 @@ * (roughly because GC is less). See Nick Dimiduk's * BlockCache 101 for some numbers. * - *

    Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}

    + *

    Enabling {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache}

    * See the HBase Reference Guide Enable BucketCache. * */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java index 0da16a7212f..bb63e018b52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCallContext.java @@ -27,7 +27,7 @@ public interface RpcCallContext extends Delayable { /** * Check if the caller who made this IPC call has disconnected. * If called from outside the context of IPC, this does nothing. - * @return < 0 if the caller is still connected. The time in ms + * @return < 0 if the caller is still connected. The time in ms * since the disconnection otherwise */ long disconnectSince(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index 86fc5dfe6b4..48a982b2ad5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.client.Scan; *

    * *
    - * List scans = new ArrayList();
    + * List<Scan> scans = new ArrayList<Scan>();
      * 
      * Scan scan1 = new Scan();
      * scan1.setStartRow(firstRow1);
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
    index 769c40bce32..cb9759df978 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
    @@ -513,7 +513,8 @@ public class TableMapReduceUtil {
        * and add it to the credentials for the given map reduce job.
        *
        * The quorumAddress is the key to the ZK ensemble, which contains:
    -   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
    +   * hbase.zookeeper.quorum, hbase.zookeeper.client.port and
    +   * zookeeper.znode.parent
        *
        * @param job The job that requires the permission.
        * @param quorumAddress string that contains the 3 required configuratins
    @@ -619,7 +620,8 @@ public class TableMapReduceUtil {
        * default; e.g. copying tables between clusters, the source would be
        * designated by hbase-site.xml and this param would have the
        * ensemble address of the remote cluster.  The format to pass is particular.
    -   * Pass  <hbase.zookeeper.quorum>:<hbase.zookeeper.client.port>:<zookeeper.znode.parent>
    +   * Pass  <hbase.zookeeper.quorum>:<
    +   *             hbase.zookeeper.client.port>:<zookeeper.znode.parent>
        *  such as server,server2,server3:2181:/hbase.
        * @param serverClass redefined hbase.regionserver.class
        * @param serverImpl redefined hbase.regionserver.impl
    @@ -650,7 +652,8 @@ public class TableMapReduceUtil {
        * default; e.g. copying tables between clusters, the source would be
        * designated by hbase-site.xml and this param would have the
        * ensemble address of the remote cluster.  The format to pass is particular.
    -   * Pass  <hbase.zookeeper.quorum>:<hbase.zookeeper.client.port>:<zookeeper.znode.parent>
    +   * Pass  <hbase.zookeeper.quorum>:<
    +   *             hbase.zookeeper.client.port>:<zookeeper.znode.parent>
        *  such as server,server2,server3:2181:/hbase.
        * @param serverClass redefined hbase.regionserver.class
        * @param serverImpl redefined hbase.regionserver.impl
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
    index f859780be07..5bd8d1536b9 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java
    @@ -139,7 +139,8 @@ public class TableRecordReaderImpl {
       /**
        * Build the scanner. Not done in constructor to allow for extension.
        *
    -   * @throws IOException, InterruptedException
    +   * @throws IOException
    +   * @throws InterruptedException
        */
       public void initialize(InputSplit inputsplit,
           TaskAttemptContext context) throws IOException,
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    index bcb652b0537..86bcdae8ea4 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    @@ -2086,7 +2086,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     
       /**
        * Report whether this master has started initialization and is about to do meta region assignment
    -   * @return true if master is in initialization & about to assign hbase:meta regions
    +   * @return true if master is in initialization & about to assign hbase:meta regions
        */
       public boolean isInitializationStartsMetaRegionAssignment() {
         return this.initializationBeforeMetaAssignment;
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
    index e82bd5467bd..50070eff67e 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
    @@ -765,7 +765,7 @@ public class MasterRpcServices extends RSRpcServices
        * @return Pair indicating the number of regions updated Pair.getFirst is the
        *         regions that are yet to be updated Pair.getSecond is the total number
        *         of regions of the table
    -   * @throws IOException
    +   * @throws ServiceException
        */
       @Override
       public GetSchemaAlterStatusResponse getSchemaAlterStatus(
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
    index 62b7333494e..c3634e2cfca 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
    @@ -53,7 +53,7 @@ import com.google.common.base.Preconditions;
     public class RegionStateStore {
       private static final Log LOG = LogFactory.getLog(RegionStateStore.class);
     
    -  /** The delimiter for meta columns for replicaIds > 0 */
    +  /** The delimiter for meta columns for replicaIds > 0 */
       protected static final char META_REPLICA_ID_DELIMITER = '_';
     
       private volatile Region metaRegion;
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
    index 9673acfe53e..fad84f512f8 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
    @@ -132,7 +132,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
        *     Order the regions to move from most recent to least.
        *
        * 
  • Iterate down the least loaded servers, assigning regions so each server - * has exactly MIN regions. Stop once you reach a server that + * has exactly MIN regions. Stop once you reach a server that * already has >= MIN regions. * * Regions being assigned to underloaded servers are those that were shed @@ -159,7 +159,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer { * *
  • If we still have more regions that need assignment, again iterate the * least loaded servers, this time giving each one (filling them to - * MAX) until we run out. + * MAX) until we run out. * *
  • All servers will now either host MIN or MAX regions. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index e58f855b1a4..4955cfaa56e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -49,8 +49,8 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** - *

    This is a best effort load balancer. Given a Cost function F(C) => x It will - * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the + *

    This is a best effort load balancer. Given a Cost function F(C) => x It will + * randomly try and mutate the cluster to Cprime. If F(Cprime) < F(C) then the * new cluster state becomes the plan. It includes costs functions to compute the cost of:

    *
      *
    • Region Load
    • diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java index 662e4bd2e5a..d352561a291 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ColumnTracker.java @@ -31,15 +31,17 @@ import org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode; *

      * Currently there are two different types of Store/Family-level queries. *

      • {@link ExplicitColumnTracker} is used when the query specifies - * one or more column qualifiers to return in the family. - *
        • {@link ScanWildcardColumnTracker} is used when no columns are - * explicitly specified. + * one or more column qualifiers to return in the family.
        • + *
        • {@link ScanWildcardColumnTracker} is used when no columns are + * explicitly specified.
        • + *
        *

        * This class is utilized by {@link ScanQueryMatcher} mainly through two methods: *

        • {@link #checkColumn} is called when a Put satisfies all other - * conditions of the query. - *
          • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher - * believes that the current column should be skipped (by timestamp, filter etc.) + * conditions of the query.
          • + *
          • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher + * believes that the current column should be skipped (by timestamp, filter etc.)
          • + *
          *

          * These two methods returns a * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java index d40b21d74b9..930baf0f0dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java @@ -68,7 +68,7 @@ public interface CompactionRequestor { /** * @param r Region to compact * @param why Why compaction was requested -- used in debug messages - * @param pri Priority of this compaction. minHeap. <=0 is critical + * @param pri Priority of this compaction. minHeap. <=0 is critical * @param requests custom compaction requests. Each compaction must specify the store on which it * is acting. Can be null in which case a compaction will be attempted on all * stores for the region. @@ -84,7 +84,7 @@ public interface CompactionRequestor { * @param r Region to compact * @param s Store within region to compact * @param why Why compaction was requested -- used in debug messages - * @param pri Priority of this compaction. minHeap. <=0 is critical + * @param pri Priority of this compaction. minHeap. <=0 is critical * @param request custom compaction request to run. {@link Store} and {@link Region} for the * request must match the region and store specified here. * @return The created {@link CompactionRequest} or null if no compaction was started diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 9839124b853..c24d6dff78a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -222,7 +222,7 @@ public class DefaultMemStore implements MemStore { /** * Write an update * @param cell - * @return approximate size of the passed KV & newly added KV which maybe different than the + * @return approximate size of the passed KV & newly added KV which maybe different than the * passed-in KV */ @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java index 70254fe2dcb..8f466fc720f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DeleteTracker.java @@ -26,9 +26,10 @@ import org.apache.hadoop.hbase.Cell; * during the course of a Get or Scan operation. *

          * This class is utilized through three methods: - *

          • {@link #add} when encountering a Delete - *
          • {@link #isDeleted} when checking if a Put KeyValue has been deleted - *
          • {@link #update} when reaching the end of a StoreFile + *
            • {@link #add} when encountering a Delete
            • + *
            • {@link #isDeleted} when checking if a Put KeyValue has been deleted
            • + *
            • {@link #update} when reaching the end of a StoreFile
            • + *
            */ @InterfaceAudience.Private public interface DeleteTracker { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java index cbf7719afa9..9e80d8df363 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ExplicitColumnTracker.java @@ -41,9 +41,10 @@ import org.apache.hadoop.hbase.util.Bytes; *

            * This class is utilized by {@link ScanQueryMatcher} mainly through two methods: *

            • {@link #checkColumn} is called when a Put satisfies all other - * conditions of the query. - *
              • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher - * believes that the current column should be skipped (by timestamp, filter etc.) + * conditions of the query.
              • + *
              • {@link #getNextRowOrNextColumn} is called whenever ScanQueryMatcher + * believes that the current column should be skipped (by timestamp, filter etc.)
              • + *
              *

              * These two methods returns a * {@link org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 54480250bb1..3deb2580c1c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -206,7 +206,7 @@ public class HeapMemoryManager { } /** - * @return heap occupancy percentage, 0 <= n <= 1 + * @return heap occupancy percentage, 0 <= n <= 1 */ public float getHeapOccupancyPercent() { return this.heapOccupancyPercent; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index 476bcdb2cb4..1439388cad1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -93,8 +93,8 @@ extends ConstantSizeRegionSplitPolicy { } /** - * @return Region max size or count of regions squared * flushsize, which ever is - * smaller; guard against there being zero regions on this server. + * @return Region max size or count of regions squared * flushsize, + * which ever is smaller; guard against there being zero regions on this server. */ protected long getSizeToCheck(final int tableRegionsCount) { // safety check for 100 to avoid numerical overflow in extreme cases diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java index 18f519866e0..b68868ed2e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LruHashMap.java @@ -36,7 +36,7 @@ import java.util.Set; * The LruHashMap is a memory-aware HashMap with a configurable maximum * memory footprint. *

              - * It maintains an ordered list of all entries in the map ordered by + * It maintains an ordered list of all entries in the/ map ordered by * access time. When space needs to be freed becase the maximum has been * reached, or the application has asked to free memory, entries will be * evicted according to an LRU (least-recently-used) algorithm. That is, @@ -102,7 +102,7 @@ implements HeapSize, Map { * @throws IllegalArgumentException if the initial capacity is less than one * @throws IllegalArgumentException if the initial capacity is greater than * the maximum capacity - * @throws IllegalArgumentException if the load factor is <= 0 + * @throws IllegalArgumentException if the load factor is <= 0 * @throws IllegalArgumentException if the max memory usage is too small * to support the base overhead */ @@ -141,7 +141,7 @@ implements HeapSize, Map { * @throws IllegalArgumentException if the initial capacity is less than one * @throws IllegalArgumentException if the initial capacity is greater than * the maximum capacity - * @throws IllegalArgumentException if the load factor is <= 0 + * @throws IllegalArgumentException if the load factor is <= 0 */ public LruHashMap(int initialCapacity, float loadFactor) { this(initialCapacity, loadFactor, DEFAULT_MAX_MEM_USAGE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java index 87710dfd54e..0566dcad870 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreChunkPool.java @@ -37,7 +37,7 @@ import org.apache.hadoop.util.StringUtils; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A pool of {@link HeapMemStoreLAB$Chunk} instances. + * A pool of {@link HeapMemStoreLAB.Chunk} instances. * * MemStoreChunkPool caches a number of retired chunks for reusing, it could * decrease allocating bytes when writing, thereby optimizing the garbage diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 9f98ba6b43e..b2cb772d3c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -22,9 +22,10 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; /** + *

              * This class is for maintaining the various regionserver statistics * and publishing them through the metrics interfaces. - *

              + *

              * This class has a number of metrics variables that are publicly accessible; * these variables (objects) have methods to update their values. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java index a2284dd5fec..2b12dec6bd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; * ObserverContext, MiniBatchOperationInProgress) * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate( * ObserverContext, MiniBatchOperationInProgress) - * @param Pair pair of Mutations and associated rowlock ids . + * @param T Pair<Mutation, Integer> pair of Mutations and associated rowlock ids . */ @InterfaceAudience.Private public class MiniBatchOperationInProgress { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java index c0ab1a07e3e..1eb05f0fbe3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/NonReversedNonLazyKeyValueScanner.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; /** - * A "non-reversed & non-lazy" scanner which does not support backward scanning + * A "non-reversed & non-lazy" scanner which does not support backward scanning * and always does a real seek operation. Most scanners are inherited from this * class. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index aedd3510ac8..cedaa7c9642 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1340,14 +1340,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler, * The opening is coordinated by ZooKeeper, and this method requires the znode to be created * before being called. As a consequence, this method should be called only from the master. *

              - * Different manages states for the region are:

                + * Different manages states for the region are: + *

                  *
                • region not opened: the region opening will start asynchronously.
                • *
                • a close is already in progress: this is considered as an error.
                • *
                • an open is already in progress: this new open request will be ignored. This is important * because the Master can do multiple requests if it crashes.
                • - *
                • the region is already opened: this new open request will be ignored./li> + *
                • the region is already opened: this new open request will be ignored.
                • *
                - *

                *

                * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign. * For a single region opening, errors are sent through a ServiceException. For bulk assign, @@ -1780,7 +1780,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler, /** * Atomically bulk load several HFiles into an open region * @return true if successful, false is failed but recoverably (no action) - * @throws IOException if failed unrecoverably + * @throws ServiceException if failed unrecoverably */ @Override public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 566745835af..5c500b485c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -514,7 +514,7 @@ public interface Region extends ConfigurationObserver { * Attempts to atomically load a group of hfiles. This is critical for loading * rows with multiple column families atomically. * - * @param familyPaths List of Pair + * @param familyPaths List of Pair<byte[] column family, String hfilePath> * @param bulkLoadListener Internal hooks enabling massaging/preparation of a * file about to be bulk loaded * @param assignSeqId @@ -652,7 +652,6 @@ public interface Region extends ConfigurationObserver { * the region needs compacting * * @throws IOException general io exceptions - * @throws DroppedSnapshotException Thrown when abort is required * because a snapshot was not properly persisted. */ FlushResult flush(boolean force) throws IOException; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java index 66e087bfd58..1bc6546eff3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionScanner.java @@ -91,7 +91,7 @@ public interface RegionScanner extends InternalScanner { * Upon returning from this method, the {@link ScannerContext} will contain information about the * progress made towards the limits. This is a special internal method to be called from * coprocessor hooks to avoid expensive setup. Caller must set the thread's readpoint, start and - * close a region operation, an synchronize on the scanner object. Example:

                +   * close a region operation, an synchronize on the scanner object. Example: 
                    * HRegion region = ...;
                    * RegionScanner scanner = ...
                    * MultiVersionConsistencyControl.setThreadReadPoint(scanner.getMvccReadPoint());
                @@ -105,7 +105,7 @@ public interface RegionScanner extends InternalScanner {
                    * } finally {
                    *   region.closeRegionOperation();
                    * }
                -   * 
                + * * @param result return output array * @param scannerContext The {@link ScannerContext} instance encapsulating all limits that should * be tracked during calls to this method. The progress towards these limits can be diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java index a5c17fb058a..adee9119306 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScanDeleteTracker.java @@ -36,9 +36,10 @@ import org.apache.hadoop.hbase.util.Bytes; * *

                * This class is utilized through three methods: - *

                • {@link #add} when encountering a Delete or DeleteColumn - *
                • {@link #isDeleted} when checking if a Put KeyValue has been deleted - *
                • {@link #update} when reaching the end of a StoreFile or row for scans + *
                  • {@link #add} when encountering a Delete or DeleteColumn
                  • + *
                  • {@link #isDeleted} when checking if a Put KeyValue has been deleted
                  • + *
                  • {@link #update} when reaching the end of a StoreFile or row for scans
                  • + *
                  *

                  * This class is NOT thread-safe as queries are never multi-threaded */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 3b169ad6f18..46fce673cda 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -126,7 +126,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf /** * Adds a value to the memstore * @param cell - * @return memstore size delta & newly added KV which maybe different than the passed in KV + * @return memstore size delta & newly added KV which maybe different than the passed in KV */ Pair add(Cell cell); @@ -136,8 +136,9 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf long timeOfOldestEdit(); /** - * Removes a Cell from the memstore. The Cell is removed only if its key & memstoreTS match the - * key & memstoreTS value of the cell parameter. + * Removes a Cell from the memstore. The Cell is removed only if its key + * & memstoreTS match the key & memstoreTS value of the cell + * parameter. * @param cell */ void rollback(final Cell cell); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 4be5c7b6a73..d63ccca88bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Scanner scans both the memstore and the Store. Coalesce KeyValue stream - * into List for a single row. + * into List<KeyValue> for a single row. */ @InterfaceAudience.Private public class StoreScanner extends NonReversedNonLazyKeyValueScanner diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java index 55c057b470b..cb8934691c9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReplayHLogKey.java @@ -44,7 +44,6 @@ public class ReplayHLogKey extends HLogKey { /** * Returns the original sequence id * @return long the new assigned sequence number - * @throws InterruptedException */ @Override public long getSequenceId() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java index 11c4ee173b2..f6619e8e0f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java @@ -57,9 +57,9 @@ import com.google.common.annotations.VisibleForTesting; * Previously, if a transaction contains 3 edits to c1, c2, c3 for a row R, * the WAL would have three log entries as follows: * - * : - * : - * : + * <logseq1-for-edit1>:<eyValue-for-edit-c1> + * <logseq2-for-edit2>:<KeyValue-for-edit-c2> + * <logseq3-for-edit3>:<KeyValue-for-edit-c3> * * This presents problems because row level atomicity of transactions * was not guaranteed. If we crash after few of the above appends make @@ -68,15 +68,15 @@ import com.google.common.annotations.VisibleForTesting; * In the new world, all the edits for a given transaction are written * out as a single record, for example: * - * : + * <logseq#-for-entire-txn>:<WALEdit-for-entire-txn> * * where, the WALEdit is serialized as: - * <-1, # of edits, , , ... > + * <-1, # of edits, <KeyValue>, <KeyValue>, ... > * For example: - * <-1, 3, , , > + * <-1, 3, <Keyvalue-for-edit-c1>, <KeyValue-for-edit-c2>, <KeyValue-for-edit-c3>> * * The -1 marker is just a special way of being backward compatible with - * an old WAL which would have contained a single . + * an old WAL which would have contained a single <KeyValue>. * * The deserializer for WALEdit backward compatibly detects if the record * is an old style KeyValue or the new style WALEdit. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java index 59a1b4395cf..1314a4dccc5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java @@ -53,9 +53,9 @@ import com.google.protobuf.ServiceException; /** * This class is responsible for replaying the edits coming from a failed region server. - *

                  + *

                  * This class uses the native HBase client in order to replay WAL entries. - *

                  + *

                  */ @InterfaceAudience.Private public class WALEditsReplaySink { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java index de82b7ea0f5..27f019a9f82 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java @@ -160,7 +160,6 @@ public abstract class HBaseReplicationEndpoint extends BaseReplicationEndpoint * Get a list of all the addresses of all the region servers * for this peer cluster * @return list of addresses - * @throws KeeperException */ // Synchronize peer cluster connection attempts to avoid races and rate // limit connections when multiple replication sources try to connect to diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java index 884bce1a38b..bf31a7dd669 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java @@ -46,9 +46,10 @@ import org.apache.hadoop.ipc.RemoteException; * For the slave cluster it selects a random number of peers * using a replication ratio. For example, if replication ration = 0.1 * and slave cluster has 100 region servers, 10 will be selected. - *

                  + *

                  * A stream is considered down when we cannot contact a region server on the * peer cluster for more than 55 seconds by default. + *

                  */ @InterfaceAudience.Private public class HBaseInterClusterReplicationEndpoint extends HBaseReplicationEndpoint { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java index 32764180aa4..7d476774ba0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java @@ -53,16 +53,17 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; /** + *

                  * This class is responsible for replicating the edits coming * from another cluster. - *

                  + *

                  * This replication process is currently waiting for the edits to be applied * before the method can return. This means that the replication of edits * is synchronized (after reading from WALs in ReplicationSource) and that a * single region server cannot receive edits from two sources at the same time - *

                  + *

                  * This class uses the native HBase client in order to replicate entries. - *

                  + *

                  * * TODO make this class more like ReplicationSource wrt log handling */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java index f7230abc081..3f23837e838 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java @@ -66,10 +66,10 @@ import com.google.common.util.concurrent.Service; * For each slave cluster it selects a random number of peers * using a replication ratio. For example, if replication ration = 0.1 * and slave cluster has 100 region servers, 10 will be selected. - *

                  + *

                  * A stream is considered down when we cannot contact a region server on the * peer cluster for more than 55 seconds by default. - *

                  + *

                  * */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java index 4d9725702f9..0c8f6f9f40f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java @@ -62,9 +62,11 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * This class is responsible to manage all the replication * sources. There are two classes of sources: + *
                    *
                  • Normal sources are persistent and one per peer cluster
                  • *
                  • Old sources are recovered from a failed region server and our * only goal is to finish replicating the WAL queue it had up in ZK
                  • + *
                  * * When a region server dies, this class uses a watcher to get notified and it * tries to grab a lock in order to transfer all the queues in a local diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java index 742fbff545f..c756576529a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationThrottler.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; /** * Per-peer per-node throttling controller for replication: enabled if - * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed + * bandwidth > 0, a cycle = 100ms, by throttling we guarantee data pushed * to peer within each cycle won't exceed 'bandwidth' bytes */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java index 19252bb9a86..131ff14ddcc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java @@ -93,6 +93,7 @@ import com.google.protobuf.InvalidProtocolBufferException; * user,family,qualifier column qualifier level permissions for a user * group,family,qualifier column qualifier level permissions for a group *
  • + *

    * All values are encoded as byte arrays containing the codes from the * org.apache.hadoop.hbase.security.access.TablePermission.Action enum. *

    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 93ad41d849b..7e9299a9399 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -127,6 +127,7 @@ import com.google.protobuf.Service; *

    * {@code AccessController} performs authorization checks for HBase operations * based on: + *

    *
      *
    • the identity of the user performing the operation
    • *
    • the scope over which the operation is performed, in increasing @@ -134,6 +135,7 @@ import com.google.protobuf.Service; *
    • the type of action being performed (as mapped to * {@link Permission.Action} values)
    • *
    + *

    * If the authorization check fails, an {@link AccessDeniedException} * will be thrown for the operation. *

    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java index 92f9d93f2a7..774930d53df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityUtils.java @@ -120,7 +120,7 @@ public class VisibilityUtils { /** * Reads back from the zookeeper. The data read here is of the form written by - * writeToZooKeeper(Map entries). + * writeToZooKeeper(Map<byte[], Integer> entries). * * @param data * @return Labels and their ordinal details diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java index cd04b828d23..2fc5d83ff61 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java @@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.util.FSUtils; * *
      * /hbase/.snapshots
    - *          /.tmp                <---- working directory
    - *          /[snapshot name]     <----- completed snapshot
    + *          /.tmp                <---- working directory
    + *          /[snapshot name]     <----- completed snapshot
      * 
    * * A completed snapshot named 'completed' then looks like (multiple regions, servers, files, etc. @@ -51,16 +51,16 @@ import org.apache.hadoop.hbase.util.FSUtils; * *
      * /hbase/.snapshots/completed
    - *                   .snapshotinfo          <--- Description of the snapshot
    - *                   .tableinfo             <--- Copy of the tableinfo
    + *                   .snapshotinfo          <--- Description of the snapshot
    + *                   .tableinfo             <--- Copy of the tableinfo
      *                    /.logs
      *                        /[server_name]
      *                            /... [log files]
      *                         ...
    - *                   /[region name]           <---- All the region's information
    - *                   .regioninfo              <---- Copy of the HRegionInfo
    + *                   /[region name]           <---- All the region's information
    + *                   .regioninfo              <---- Copy of the HRegionInfo
      *                      /[column family name]
    - *                          /[hfile name]     <--- name of the hfile in the real region
    + *                          /[hfile name]     <--- name of the hfile in the real region
      *                          ...
      *                      ...
      *                    ...
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
    index 2aba737b575..6869d69e123 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterWriter.java
    @@ -29,7 +29,7 @@ import org.apache.hadoop.io.Writable;
     @InterfaceAudience.Private
     public interface BloomFilterWriter extends BloomFilterBase {
     
    -  /** Compact the Bloom filter before writing metadata & data to disk. */
    +  /** Compact the Bloom filter before writing metadata & data to disk. */
       void compactBloom();
       /**
        * Get a writable interface into bloom filter meta data.
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    index a591cf054d9..6d103518a32 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
    @@ -180,8 +180,9 @@ public abstract class FSUtils {
       }
     
       /**
    -   * Compare of path component. Does not consider schema; i.e. if schemas different but path
    -   *  starts with rootPath, then the function returns true
    +   * Compare of path component. Does not consider schema; i.e. if schemas
    +   * different but path starts with rootPath,
    +   * then the function returns true
        * @param rootPath
        * @param path
        * @return True if path starts with rootPath
    @@ -1435,7 +1436,7 @@ public abstract class FSUtils {
        * Given a particular table dir, return all the regiondirs inside it, excluding files such as
        * .tableinfo
        * @param fs A file system for the Path
    -   * @param tableDir Path to a specific table directory /
    +   * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir>
        * @return List of paths to valid region directories in table dir.
        * @throws IOException
        */
    @@ -1452,7 +1453,7 @@ public abstract class FSUtils {
     
       /**
        * Filter for all dirs that are legal column family names.  This is generally used for colfam
    -   * dirs ///.
    +   * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>.
        */
       public static class FamilyDirFilter implements PathFilter {
         final FileSystem fs;
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    index 3e164bab402..cc87f64920d 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
    @@ -611,7 +611,7 @@ public class HBaseFsck extends Configured implements Closeable {
        * region servers and the masters.  It makes each region's state in HDFS, in
        * hbase:meta, and deployments consistent.
        *
    -   * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable
    +   * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable
        * error.  If 0, we have a clean hbase.
        */
       public int onlineConsistencyRepair() throws IOException, KeeperException,
    diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
    index faced067e13..7f74d551c2c 100644
    --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
    +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileV1Detector.java
    @@ -60,10 +60,12 @@ import org.apache.hadoop.util.ToolRunner;
      * have such files.
      * 

    * To print the help section of the tool: + *

    *
      - *
    • ./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or, - *
    • java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h + *
    • ./bin/hbase org.apache.hadoop.hbase.util.HFileV1Detector --h or,
    • + *
    • java -cp `hbase classpath` org.apache.hadoop.hbase.util.HFileV1Detector --h
    • *
    + *

    * It also supports -h, --help, -help options. *

    */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java index a55c8765658..ed72ea2dc04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MultiHConnection.java @@ -111,7 +111,6 @@ public class MultiHConnection { * @param results the results array * @param callback * @throws IOException - * @throws InterruptedException */ @SuppressWarnings("deprecation") public void processBatchCallback(List actions, TableName tableName, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index 3a08750ffde..ea704f82404 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -85,7 +85,7 @@ import com.google.common.collect.Sets; * Answer: Automatic splitting is determined by the configuration value * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting - * is 100GB, which would result in > 1hr major compactions if reached. + * is 100GB, which would result in > 1hr major compactions if reached. *

    * Question: Why did the original authors decide to manually split?
    * Answer: Specific workload characteristics of our use case allowed us @@ -227,7 +227,7 @@ public class RegionSplitter { /** * @param row * byte array representing a row in HBase - * @return String to use for debug & file printing + * @return String to use for debug & file printing */ String rowToStr(byte[] row); @@ -254,12 +254,12 @@ public class RegionSplitter { *

    *

      *
    • create a table named 'myTable' with 60 pre-split regions containing 2 - * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII: + * column families 'test' & 'rs', assuming the keys are hex-encoded ASCII: *
        *
      • bin/hbase org.apache.hadoop.hbase.util.RegionSplitter -c 60 -f test:rs * myTable HexStringSplit *
      - *
    • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 + *
    • perform a rolling split of 'myTable' (i.e. 60 => 120 regions), # 2 * outstanding splits at a time, assuming keys are uniformly distributed * bytes: *
        @@ -878,10 +878,10 @@ public class RegionSplitter { * boundaries. The format of a HexStringSplit region boundary is the ASCII * representation of an MD5 checksum, or any other uniformly distributed * hexadecimal value. Row are hex-encoded long values in the range - * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the + * "00000000" => "FFFFFFFF" and are left-padded with zeros to keep the * same order lexicographically as if they were binary. * - * Since this split algorithm uses hex strings as keys, it is easy to read & + * Since this split algorithm uses hex strings as keys, it is easy to read & * write in the shell but takes up more space and may be non-intuitive. */ public static class HexStringSplit implements SplitAlgorithm { @@ -1032,7 +1032,7 @@ public class RegionSplitter { /** * A SplitAlgorithm that divides the space of possible keys evenly. Useful * when the keys are approximately uniform random bytes (e.g. hashes). Rows - * are raw byte values in the range 00 => FF and are right-padded with + * are raw byte values in the range 00 => FF and are right-padded with * zeros to keep the same memcmp() order. This is the natural algorithm to use * for a byte[] environment and saves space, but is not necessarily the * easiest for readability. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 67f8e84f24c..5c61afb7464 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -44,7 +44,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { * Whether asynchronous WAL replication to the secondary region replicas is enabled or not. * If this is enabled, a replication peer named "region_replica_replication" will be created * which will tail the logs and replicate the mutatations to region replicas for tables that - * have region replication > 1. If this is enabled once, disabling this replication also + * have region replication > 1. If this is enabled once, disabling this replication also * requires disabling the replication peer using shell or ReplicationAdmin java class. * Replication to secondary region replicas works over standard inter-cluster replication.· * So replication, if disabled explicitly, also has to be enabled by setting "hbase.replication"· diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index 621c200b647..5ac8c11e6f8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -295,9 +295,9 @@ public class WALKey implements SequenceId, Comparable { } /** - * Wait for sequence number is assigned & return the assigned value + * Wait for sequence number is assigned & return the assigned value * @return long the new assigned sequence number - * @throws InterruptedException + * @throws IOException */ @Override public long getSequenceId() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index fb769c02270..c6bc69094d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -142,11 +142,11 @@ public class ZKSplitLog { */ /** - * check if /hbase/recovering-regions/ exists. Returns true if exists - * and set watcher as well. + * check if /hbase/recovering-regions/<current region encoded name> + * exists. Returns true if exists and set watcher as well. * @param zkw * @param regionEncodedName region encode name - * @return true when /hbase/recovering-regions/ exists + * @return true when /hbase/recovering-regions/<current region encoded name> exists * @throws KeeperException */ public static boolean @@ -199,7 +199,7 @@ public class ZKSplitLog { * @param zkw * @param serverName * @param encodedRegionName - * @return the last flushed sequence ids recorded in ZK of the region for serverName + * @return the last flushed sequence ids recorded in ZK of the region for serverName * @throws IOException */ diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java index daf320ce326..7b0ca04f497 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java @@ -676,7 +676,6 @@ public class ThriftServerRunner implements Runnable { * name of table * @return Table object * @throws IOException - * @throws IOError */ public Table getTable(final byte[] tableName) throws IOException { @@ -718,7 +717,7 @@ public class ThriftServerRunner implements Runnable { /** * Removes the scanner associated with the specified ID from the internal - * id->scanner hash-map. + * id->scanner hash-map. * * @param id * @return a Scanner, or null if ID was invalid. diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java index bb9e58cbd1b..db48a622c9a 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/generated/Hbase.java @@ -564,8 +564,6 @@ public class Hbase { * * @throws IllegalArgument if ScannerID is invalid * - * @throws NotFound when the scanner reaches the end - * * @param id id of a scanner returned by scannerOpen */ public List scannerGet(int id) throws IOError, IllegalArgument, org.apache.thrift.TException; @@ -580,8 +578,6 @@ public class Hbase { * * @throws IllegalArgument if ScannerID is invalid * - * @throws NotFound when the scanner reaches the end - * * @param id id of a scanner returned by scannerOpen * * @param nbRows number of results to return