HBASE-27401 Clean up current broken 'n's in our javadoc (#4812)

Signed-off-by: Andrew Purtell <apurtell@apache.org>
(cherry picked from commit 63cdd026f0)

Conflicts:
	hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManager.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
	hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
	hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestReplication.java
	hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
	hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManager.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/HBaseServerBase.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/replication/HBaseReplicationEndpoint.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsSource.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtil.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestBulkLoadHFilesSplitRecovery.java
	hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
This commit is contained in:
Duo Zhang 2022-10-06 18:17:34 +08:00
parent 9d7614bbdc
commit 8c2dd12adb
612 changed files with 2991 additions and 2886 deletions

View File

@ -367,7 +367,7 @@ public final class FanOutOneBlockAsyncDFSOutputSaslHelper {
* Create a ByteString from byte array without copying (wrap), and then set it as the payload
* for the builder.
* @param builder builder for HDFS DataTransferEncryptorMessage.
* @param payload byte array of payload. n
* @param payload byte array of payload.
*/
static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder,
byte[] payload) throws IOException {

View File

@ -102,8 +102,8 @@ public class AsyncMetaTableAccessor {
}
/**
* Returns the HRegionLocation from meta for the given region n * @param regionName region we're
* looking for
* Returns the HRegionLocation from meta for the given region
* @param regionName region we're looking for
* @return HRegionLocation for the given region
*/
public static CompletableFuture<Optional<HRegionLocation>>
@ -128,8 +128,8 @@ public class AsyncMetaTableAccessor {
}
/**
* Returns the HRegionLocation from meta for the given encoded region name n * @param
* encodedRegionName region we're looking for
* Returns the HRegionLocation from meta for the given encoded region name
* @param encodedRegionName region we're looking for
* @return HRegionLocation for the given region
*/
public static CompletableFuture<Optional<HRegionLocation>>
@ -176,8 +176,8 @@ public class AsyncMetaTableAccessor {
}
/**
* Used to get all region locations for the specific table. n * @param tableName table we're
* looking for, can be null for getting all regions
* Used to get all region locations for the specific table.
* @param tableName table we're looking for, can be null for getting all regions
* @return the list of region locations. The return value will be wrapped by a
* {@link CompletableFuture}.
*/
@ -200,8 +200,8 @@ public class AsyncMetaTableAccessor {
}
/**
* Used to get table regions' info and server. n * @param tableName table we're looking for, can
* be null for getting all regions
* Used to get table regions' info and server.
* @param tableName table we're looking for, can be null for getting all regions
* @param excludeOfflinedSplitParents don't return split parents
* @return the list of regioninfos and server. The return value will be wrapped by a
* {@link CompletableFuture}.
@ -259,9 +259,10 @@ public class AsyncMetaTableAccessor {
}
/**
* Performs a scan of META table for given table. n * @param tableName table withing we scan
* @param type scanned part of meta
* @param visitor Visitor invoked against each row
* Performs a scan of META table for given table.
* @param tableName table withing we scan
* @param type scanned part of meta
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
TableName tableName, QueryType type, final Visitor visitor) {
@ -270,11 +271,12 @@ public class AsyncMetaTableAccessor {
}
/**
* Performs a scan of META table for given table. n * @param startRow Where to start the scan
* @param stopRow Where to stop the scan
* @param type scanned part of meta
* @param maxRows maximum rows to return
* @param visitor Visitor invoked against each row
* Performs a scan of META table for given table.
* @param startRow Where to start the scan
* @param stopRow Where to stop the scan
* @param type scanned part of meta
* @param maxRows maximum rows to return
* @param visitor Visitor invoked against each row
*/
private static CompletableFuture<Void> scanMeta(AsyncTable<AdvancedScanResultConsumer> metaTable,
byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) {

View File

@ -52,7 +52,8 @@ public class ClusterId {
/**
* @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix
* @return An instance of {@link ClusterId} made from <code>bytes</code> n * @see #toByteArray()
* @return An instance of {@link ClusterId} made from <code>bytes</code>
* @see #toByteArray()
*/
public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException {
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
@ -78,9 +79,7 @@ public class ClusterId {
return builder.setClusterId(this.id).build();
}
/**
* n * @return A {@link ClusterId} made from the passed in <code>cid</code>
*/
/** Returns A {@link ClusterId} made from the passed in <code>cid</code> */
public static ClusterId convert(final ClusterIdProtos.ClusterId cid) {
return new ClusterId(cid.getClusterId());
}

View File

@ -269,7 +269,7 @@ public class ClusterStatus implements ClusterMetrics {
}
/**
* n * @return Server's load or null if not found.
* @return Server's load or null if not found.
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link #getLiveServerMetrics} instead.
*/

View File

@ -358,8 +358,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
/**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
* DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
* invocation)
* DataBlockEncoding is been used, this is having no effect.
* @return this (for chained invocation)
*/
public HColumnDescriptor setCompressTags(boolean value) {
getDelegateeForModification().setCompressTags(value);
@ -668,8 +668,8 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
/**
* @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix
* @return An instance of {@link HColumnDescriptor} made from <code>bytes</code> n * @see
* #toByteArray()
* @return An instance of {@link HColumnDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
@ -713,7 +713,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
}
/**
* Set the encryption algorithm for use with this family n
* Set the encryption algorithm for use with this family
*/
public HColumnDescriptor setEncryptionType(String value) {
getDelegateeForModification().setEncryptionType(value);

View File

@ -93,7 +93,7 @@ public class HRegionInfo implements RegionInfo {
private static final int MAX_REPLICA_ID = 0xFFFF;
/**
* n * @return the encodedName
* @return the encodedName
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}.
*/
@ -211,7 +211,7 @@ public class HRegionInfo implements RegionInfo {
* Construct HRegionInfo with explicit parameters
* @param tableName the table name
* @param startKey first key in region
* @param endKey end of key range n
* @param endKey end of key range
*/
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey)
throws IllegalArgumentException {
@ -224,7 +224,7 @@ public class HRegionInfo implements RegionInfo {
* @param startKey first key in region
* @param endKey end of key range
* @param split true if this region has split and we have daughter regions regions that may or
* may not hold references to this region. n
* may not hold references to this region.
*/
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
final boolean split) throws IllegalArgumentException {
@ -238,7 +238,7 @@ public class HRegionInfo implements RegionInfo {
* @param endKey end of key range
* @param split true if this region has split and we have daughter regions regions that may or
* may not hold references to this region.
* @param regionid Region id to use. n
* @param regionid Region id to use.
*/
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
final boolean split, final long regionid) throws IllegalArgumentException {
@ -253,7 +253,7 @@ public class HRegionInfo implements RegionInfo {
* @param split true if this region has split and we have daughter regions regions that may or
* may not hold references to this region.
* @param regionid Region id to use.
* @param replicaId the replicaId to use n
* @param replicaId the replicaId to use
*/
public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey,
final boolean split, final long regionid, final int replicaId) throws IllegalArgumentException {
@ -279,7 +279,7 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Costruct a copy of another HRegionInfo n
* Costruct a copy of another HRegionInfo
*/
public HRegionInfo(RegionInfo other) {
super();
@ -302,7 +302,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Make a region name of passed parameters. n * @param startKey Can be null
* Make a region name of passed parameters.
* @param startKey Can be null
* @param regionid Region id (Usually timestamp from when region was created).
* @param newFormat should we create the region name in the new format (such that it contains its
* encoded name?).
@ -318,7 +319,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Make a region name of passed parameters. n * @param startKey Can be null
* Make a region name of passed parameters.
* @param startKey Can be null
* @param id Region id (Usually timestamp from when region was created).
* @param newFormat should we create the region name in the new format (such that it contains its
* encoded name?).
@ -334,10 +336,11 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Make a region name of passed parameters. n * @param startKey Can be null
* @param regionid Region id (Usually timestamp from when region was created). n * @param
* newFormat should we create the region name in the new format (such that it
* contains its encoded name?).
* Make a region name of passed parameters.
* @param startKey Can be null
* @param regionid Region id (Usually timestamp from when region was created).
* @param newFormat should we create the region name in the new format (such that it contains its
* encoded name?).
* @return Region name made of passed tableName, startKey, id and replicaId
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}.
@ -351,7 +354,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Make a region name of passed parameters. n * @param startKey Can be null
* Make a region name of passed parameters.
* @param startKey Can be null
* @param id Region id (Usually timestamp from when region was created).
* @param newFormat should we create the region name in the new format (such that it contains its
* encoded name?).
@ -367,9 +371,10 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Make a region name of passed parameters. n * @param startKey Can be null
* @param id Region id (Usually timestamp from when region was created). n * @param newFormat
* should we create the region name in the new format
* Make a region name of passed parameters.
* @param startKey Can be null
* @param id Region id (Usually timestamp from when region was created).
* @param newFormat should we create the region name in the new format
* @return Region name made of passed tableName, startKey, id and replicaId
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}.
@ -394,7 +399,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Gets the start key from the specified region name. n * @return Start key.
* Gets the start key from the specified region name.
* @return Start key.
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}.
*/
@ -404,9 +410,10 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Separate elements of a regionName. n * @return Array of byte[] containing tableName, startKey
* and id n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#parseRegionName(byte[])}.
* Separate elements of a regionName.
* @return Array of byte[] containing tableName, startKey and id
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#parseRegionName(byte[])}.
*/
@Deprecated
@InterfaceAudience.Private
@ -415,9 +422,9 @@ public class HRegionInfo implements RegionInfo {
}
/**
* n * @return if region name is encoded. n * @deprecated As of release 2.0.0, this will be
* removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
* @return if region name is encoded.
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}.
*/
@Deprecated
public static boolean isEncodedRegionName(byte[] regionName) throws IOException {
@ -483,7 +490,7 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get current table name of the region n
* Get current table name of the region
*/
@Override
public TableName getTable() {
@ -725,7 +732,8 @@ public class HRegionInfo implements RegionInfo {
/**
* @param bytes A pb RegionInfo serialized with a pb magic prefix.
* @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
* @return A deserialized {@link HRegionInfo}
* @see #toByteArray()
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}.
*/
@ -738,7 +746,8 @@ public class HRegionInfo implements RegionInfo {
* @param bytes A pb RegionInfo serialized with a pb magic prefix.
* @param offset starting point in the byte array
* @param len length to read on the byte array
* @return A deserialized {@link HRegionInfo} n * @see #toByteArray()
* @return A deserialized {@link HRegionInfo}
* @see #toByteArray()
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}.
*/
@ -763,8 +772,8 @@ public class HRegionInfo implements RegionInfo {
/**
* Use this instead of {@link #toByteArray()} when writing to a stream and you want to use the pb
* mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want).
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
* #toByteArray()
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
* @see #toByteArray()
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#toDelimitedByteArray(RegionInfo)}.
*/
@ -774,8 +783,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally nn
* * @return descriptive string
* Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally
* @return descriptive string
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState,
* Configuration) over in hbase-server module.
@ -788,7 +797,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get the end key for display. Optionally hide the real end key. nn * @return the endkey
* Get the end key for display. Optionally hide the real end key.
* @return the endkey
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) over in
* hbase-server module.
@ -800,7 +810,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get the start key for display. Optionally hide the real start key. nn * @return the startkey
* Get the start key for display. Optionally hide the real start key.
* @return the startkey
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) over in
* hbase-server module.
@ -812,8 +823,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get the region name for display. Optionally hide the start key. nn * @return region name as
* String
* Get the region name for display. Optionally hide the start key.
* @return region name as String
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) over
* in hbase-server module.
@ -825,7 +836,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Get the region name for display. Optionally hide the start key. nn * @return region name bytes
* Get the region name for display. Optionally hide the start key.
* @return region name bytes
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) over in
* hbase-server module.
@ -838,9 +850,10 @@ public class HRegionInfo implements RegionInfo {
/**
* Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was
* serialized to the stream with {@link #toDelimitedByteArray()} n * @return An instance of
* HRegionInfo. n * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#parseFrom(DataInputStream)}.
* serialized to the stream with {@link #toDelimitedByteArray()}
* @return An instance of HRegionInfo.
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#parseFrom(DataInputStream)}.
*/
@Deprecated
@InterfaceAudience.Private
@ -868,8 +881,8 @@ public class HRegionInfo implements RegionInfo {
* to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can be
* used to read back the instances.
* @param infos HRegionInfo objects to serialize
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix. n * @see
* #toByteArray()
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
* @see #toByteArray()
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}.
*/
@ -910,7 +923,8 @@ public class HRegionInfo implements RegionInfo {
}
/**
* Check whether two regions are adjacent nn * @return true if two regions are adjacent
* Check whether two regions are adjacent
* @return true if two regions are adjacent
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}.
*/

View File

@ -82,7 +82,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
}
/**
* @return Immutable HRegionInfo
* Returns immutable HRegionInfo
* @deprecated Since 2.0.0. Will remove in 3.0.0. Use {@link #getRegion()}} instead.
*/
@Deprecated
@ -90,9 +90,7 @@ public class HRegionLocation implements Comparable<HRegionLocation> {
return regionInfo == null ? null : new ImmutableHRegionInfo(regionInfo);
}
/**
* n
*/
/** Returns regionInfo */
public RegionInfo getRegion() {
return regionInfo;
}

View File

@ -353,7 +353,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
}
/**
* Get the name of the table n
* Get the name of the table
*/
@Override
public TableName getTableName() {
@ -715,7 +715,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
* org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be
* loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
* opened.
* @param className Full class name. n
* @param className Full class name.
*/
public HTableDescriptor addCoprocessor(String className) throws IOException {
getDelegateeForModification().setCoprocessor(className);
@ -731,7 +731,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
* classloader.
* @param className Full class name.
* @param priority Priority
* @param kvs Arbitrary key-value parameter pairs passed into the coprocessor. n
* @param kvs Arbitrary key-value parameter pairs passed into the coprocessor.
*/
public HTableDescriptor addCoprocessor(String className, Path jarFilePath, int priority,
final Map<String, String> kvs) throws IOException {
@ -747,7 +747,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
* loaded or not. Whether a coprocessor is loadable or not will be determined when a region is
* opened.
* @param specStr The Coprocessor specification all in in one String formatted so matches
* {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} n
* {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN}
*/
public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException {
getDelegateeForModification().setCoprocessorWithSpec(specStr);
@ -828,8 +828,8 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
/**
* @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code> nn * @see
* #toByteArray()
* @return An instance of {@link HTableDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
public static HTableDescriptor parseFrom(final byte[] bytes)
throws DeserializationException, IOException {

View File

@ -643,7 +643,7 @@ public class MetaTableAccessor {
/**
* @param connection connection we're using
* @param serverName server whose regions we're interested in
* @return List of user regions installed on this server (does not include catalog regions). n
* @return List of user regions installed on this server (does not include catalog regions).
*/
public static NavigableMap<RegionInfo, Result> getServerUserRegions(Connection connection,
final ServerName serverName) throws IOException {

View File

@ -34,8 +34,7 @@ public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException {
}
/**
* n
*/
* */
public NotAllMetaRegionsOnlineException(String message) {
super(message);
}

View File

@ -392,7 +392,7 @@ public class ServerLoad implements ServerMetrics {
}
/**
* Call directly from client such as hbase shell n
* Call directly from client such as hbase shell
*/
@Override
public ReplicationLoadSink getReplicationLoadSink() {

View File

@ -70,7 +70,7 @@ public interface ServerMetrics {
Map<String, List<ReplicationLoadSource>> getReplicationLoadSourceMap();
/**
* Call directly from client such as hbase shell n
* Call directly from client such as hbase shell
*/
@Nullable
ReplicationLoadSink getReplicationLoadSink();

View File

@ -532,9 +532,10 @@ public interface Admin extends Abortable, Closeable {
* Disable table and wait on completion. May timeout eventually. Use
* {@link #disableTableAsync(org.apache.hadoop.hbase.TableName)} and
* {@link #isTableDisabled(org.apache.hadoop.hbase.TableName)} instead. The table has to be in
* enabled state for it to be disabled. n * @throws IOException There could be couple types of
* IOException TableNotFoundException means the table doesn't exist. TableNotEnabledException
* means the table isn't in enabled state.
* enabled state for it to be disabled.
* @throws IOException There could be couple types of IOException TableNotFoundException means the
* table doesn't exist. TableNotEnabledException means the table isn't in
* enabled state.
*/
default void disableTable(TableName tableName) throws IOException {
get(disableTableAsync(tableName), getSyncWaitTimeout(), TimeUnit.MILLISECONDS);
@ -905,7 +906,7 @@ public interface Admin extends Abortable, Closeable {
* then it returns. It does not wait on the completion of Compaction (it can take a while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if a remote or network exception occurs n
* @throws IOException if a remote or network exception occurs
*/
void compact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@ -917,7 +918,7 @@ public interface Admin extends Abortable, Closeable {
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if not a mob column family or if a remote or network exception occurs n
* @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void compact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@ -966,7 +967,7 @@ public interface Admin extends Abortable, Closeable {
* while).
* @param tableName table to compact
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if a remote or network exception occurs n
* @throws IOException if a remote or network exception occurs
*/
void majorCompact(TableName tableName, CompactType compactType)
throws IOException, InterruptedException;
@ -978,7 +979,7 @@ public interface Admin extends Abortable, Closeable {
* @param tableName table to compact
* @param columnFamily column family within a table
* @param compactType {@link org.apache.hadoop.hbase.client.CompactType}
* @throws IOException if not a mob column family or if a remote or network exception occurs n
* @throws IOException if not a mob column family or if a remote or network exception occurs
*/
void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType)
throws IOException, InterruptedException;
@ -989,10 +990,10 @@ public interface Admin extends Abortable, Closeable {
* can take a while).
* @param sn the region server name
* @param major if it's major compaction
* @throws IOException if a remote or network exception occurs n * @deprecated As of release
* 2.0.0, this will be removed in HBase 3.0.0. Use
* {@link #compactRegionServer(ServerName)} or
* {@link #majorCompactRegionServer(ServerName)}.
* @throws IOException if a remote or network exception occurs
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
* {@link #compactRegionServer(ServerName)} or
* {@link #majorCompactRegionServer(ServerName)}.
*/
@Deprecated
default void compactRegionServer(ServerName sn, boolean major)
@ -2562,7 +2563,7 @@ public interface Admin extends Abortable, Closeable {
/**
* Return the set of supported security capabilities.
* @throws IOException if a remote or network exception occurs n
* @throws IOException if a remote or network exception occurs
*/
List<SecurityCapability> getSecurityCapabilities() throws IOException;
@ -2906,7 +2907,7 @@ public interface Admin extends Abortable, Closeable {
* Clear compacting queues on a regionserver.
* @param serverName the region server name
* @param queues the set of queue name
* @throws IOException if a remote or network exception occurs n
* @throws IOException if a remote or network exception occurs
*/
void clearCompactionQueues(ServerName serverName, Set<String> queues)
throws IOException, InterruptedException;

View File

@ -61,7 +61,7 @@ public class Append extends Mutation {
* <p>
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive n
* @param maxStamp maximum timestamp value, exclusive
*/
public Append setTimeRange(long minStamp, long maxStamp) {
tr = new TimeRange(minStamp, maxStamp);
@ -69,7 +69,7 @@ public class Append extends Mutation {
}
/**
* Gets the TimeRange used for this append. n
* Gets the TimeRange used for this append.
*/
public TimeRange getTimeRange() {
return this.tr;
@ -81,7 +81,7 @@ public class Append extends Mutation {
}
/**
* n * True (default) if the append operation should return the results. A client that is not
* True (default) if the append operation should return the results. A client that is not
* interested in the result can save network bandwidth setting this to false.
*/
@Override
@ -120,7 +120,7 @@ public class Append extends Mutation {
* Create a Append operation for the specified row.
* <p>
* At least one column must be appended to.
* @param rowArray Makes a copy out of this buffer. nn
* @param rowArray Makes a copy out of this buffer.
*/
public Append(final byte[] rowArray, final int rowOffset, final int rowLength) {
checkRow(rowArray, rowOffset, rowLength);
@ -142,9 +142,9 @@ public class Append extends Mutation {
* Add the specified column and value to this Append operation.
* @param family family name
* @param qualifier column qualifier
* @param value value to append to specified column n * @deprecated As of release 2.0.0, this
* will be removed in HBase 3.0.0. Use {@link #addColumn(byte[], byte[], byte[])}
* instead
* @param value value to append to specified column
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
* {@link #addColumn(byte[], byte[], byte[])} instead
*/
@Deprecated
public Append add(byte[] family, byte[] qualifier, byte[] value) {
@ -155,7 +155,7 @@ public class Append extends Mutation {
* Add the specified column and value to this Append operation.
* @param family family name
* @param qualifier column qualifier
* @param value value to append to specified column n
* @param value value to append to specified column
*/
public Append addColumn(byte[] family, byte[] qualifier, byte[] value) {
KeyValue kv = new KeyValue(this.row, family, qualifier, this.ts, KeyValue.Type.Put, value);
@ -163,7 +163,8 @@ public class Append extends Mutation {
}
/**
* Add column and value to this Append operation. n * @return This instance
* Add column and value to this Append operation.
* @return This instance
*/
@SuppressWarnings("unchecked")
public Append add(final Cell cell) {

View File

@ -198,7 +198,7 @@ public interface AsyncAdmin {
CompletableFuture<Void> enableTable(TableName tableName);
/**
* Disable a table. The table has to be in enabled state for it to be disabled. n
* Disable a table. The table has to be in enabled state for it to be disabled.
*/
CompletableFuture<Void> disableTable(TableName tableName);
@ -1098,7 +1098,7 @@ public interface AsyncAdmin {
CompletableFuture<Void> stopMaster();
/**
* Stop the designated regionserver. n
* Stop the designated regionserver.
*/
CompletableFuture<Void> stopRegionServer(ServerName serverName);
@ -1126,19 +1126,20 @@ public interface AsyncAdmin {
CompletableFuture<Void> rollWALWriter(ServerName serverName);
/**
* Clear compacting queues on a region server. n * @param queues the set of queue name
* Clear compacting queues on a region server.
* @param queues the set of queue name
*/
CompletableFuture<Void> clearCompactionQueues(ServerName serverName, Set<String> queues);
/**
* Get a list of {@link RegionMetrics} of all regions hosted on a region seerver. n * @return a
* list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
* Get a list of {@link RegionMetrics} of all regions hosted on a region seerver.
* @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
*/
CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName);
/**
* Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table. nn
* * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
* Get a list of {@link RegionMetrics} of all regions hosted on a region seerver for a table.
* @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture}
*/
CompletableFuture<List<RegionMetrics>> getRegionMetrics(ServerName serverName,
TableName tableName);
@ -1285,8 +1286,8 @@ public interface AsyncAdmin {
CompletableFuture<Boolean> normalize(NormalizeTableFilterParams ntfp);
/**
* Turn the cleaner chore on/off. n * @return Previous cleaner state wrapped by a
* {@link CompletableFuture}
* Turn the cleaner chore on/off.
* @return Previous cleaner state wrapped by a {@link CompletableFuture}
*/
CompletableFuture<Boolean> cleanerChoreSwitch(boolean on);
@ -1305,8 +1306,8 @@ public interface AsyncAdmin {
CompletableFuture<Boolean> runCleanerChore();
/**
* Turn the catalog janitor on/off. n * @return the previous state wrapped by a
* {@link CompletableFuture}
* Turn the catalog janitor on/off.
* @return the previous state wrapped by a {@link CompletableFuture}
*/
CompletableFuture<Boolean> catalogJanitorSwitch(boolean on);

View File

@ -180,7 +180,7 @@ class AsyncConnectionImpl implements AsyncConnection {
}
/**
* If choreService has not been created yet, create the ChoreService. n
* If choreService has not been created yet, create the ChoreService.
*/
synchronized ChoreService getChoreService() {
if (isClosed()) {

View File

@ -203,7 +203,7 @@ class AsyncProcess {
* The submitted task may be not accomplished at all if there are too many running tasks or other
* limits.
* @param <CResult> The class to cast the result
* @param task The setting and data n
* @param task The setting and data
*/
public <CResult> AsyncRequestFuture submit(AsyncProcessTask<CResult> task)
throws InterruptedIOException {

View File

@ -91,7 +91,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
* @param conf The {@link Configuration} to use.
* @param scan {@link Scan} to use in this scanner
* @param tableName The table that we wish to scan
* @param connection Connection identifying the cluster n
* @param connection Connection identifying the cluster
*/
public ClientScanner(final Configuration conf, final Scan scan, final TableName tableName,
ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,

View File

@ -55,8 +55,8 @@ public interface ClusterConnection extends Connection {
/**
* Use this api to check if the table has been created with the specified number of splitkeys
* which was used while creating the given table. Note : If this api is used after a table's
* region gets splitted, the api may return false. n * tableName n * splitKeys used while creating
* table n * if a remote or network exception occurs
* region gets splitted, the api may return false. tableName splitKeys used while creating table
* if a remote or network exception occurs
*/
boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException;
@ -255,7 +255,7 @@ public interface ClusterConnection extends Connection {
* Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. This
* RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be
* intercepted with the configured {@link RetryingCallerInterceptor}
* @param conf configuration n
* @param conf configuration
*/
RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);

View File

@ -126,7 +126,7 @@ public interface ColumnFamilyDescriptor {
int getMinVersions();
/**
* Get the mob compact partition policy for this family n
* Get the mob compact partition policy for this family
*/
MobCompactPartitionPolicy getMobCompactPartitionPolicy();

View File

@ -816,8 +816,8 @@ public class ColumnFamilyDescriptorBuilder {
/**
* Set whether the tags should be compressed along with DataBlockEncoding. When no
* DataBlockEncoding is been used, this is having no effect. n * @return this (for chained
* invocation)
* DataBlockEncoding is been used, this is having no effect.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) {
return setValue(COMPRESS_TAGS_BYTES, String.valueOf(compressTags));
@ -1195,7 +1195,7 @@ public class ColumnFamilyDescriptorBuilder {
* @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic
* prefix
* @return An instance of {@link ModifyableColumnFamilyDescriptor} made from <code>bytes</code>
* n * @see #toByteArray()
* @see #toByteArray()
*/
private static ColumnFamilyDescriptor parseFrom(final byte[] bytes)
throws DeserializationException {
@ -1241,8 +1241,8 @@ public class ColumnFamilyDescriptorBuilder {
}
/**
* Remove a configuration setting represented by the key from the {@link #configuration} map. n
* * @return this (for chained invocation)
* Remove a configuration setting represented by the key from the {@link #configuration} map.
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor removeConfiguration(final String key) {
return setConfiguration(key, null);
@ -1254,8 +1254,8 @@ public class ColumnFamilyDescriptorBuilder {
}
/**
* Set the encryption algorithm for use with this family n * @return this (for chained
* invocation)
* Set the encryption algorithm for use with this family
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setEncryptionType(String algorithm) {
return setValue(ENCRYPTION_BYTES, algorithm);
@ -1267,7 +1267,8 @@ public class ColumnFamilyDescriptorBuilder {
}
/**
* Set the raw crypto key attribute for the family n * @return this (for chained invocation)
* Set the raw crypto key attribute for the family
* @return this (for chained invocation)
*/
public ModifyableColumnFamilyDescriptor setEncryptionKey(byte[] keyBytes) {
return setValue(ENCRYPTION_KEY_BYTES, new Bytes(keyBytes));

View File

@ -599,7 +599,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
}
/**
* If choreService has not been created yet, create the ChoreService. n
* If choreService has not been created yet, create the ChoreService.
*/
synchronized ChoreService getChoreService() {
if (choreService == null) {

View File

@ -95,7 +95,7 @@ public class Delete extends Mutation {
* <p>
* This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
* must specify each timestamp individually.
* @param row We make a local copy of this passed in row. nn
* @param row We make a local copy of this passed in row.
*/
public Delete(final byte[] row, final int rowOffset, final int rowLength) {
this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@ -109,8 +109,8 @@ public class Delete extends Mutation {
* <p>
* This timestamp is ONLY used for a delete row operation. If specifying families or columns, you
* must specify each timestamp individually.
* @param row We make a local copy of this passed in row. nn * @param timestamp maximum version
* timestamp (only for delete row)
* @param row We make a local copy of this passed in row.
* @param timestamp maximum version timestamp (only for delete row)
*/
public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) {
checkRow(row, rowOffset, rowLength);
@ -140,8 +140,9 @@ public class Delete extends Mutation {
/**
* Advanced use only. Add an existing delete marker to this Delete object.
* @param kv An existing KeyValue of type "delete".
* @return this for invocation chaining n * @deprecated As of release 2.0.0, this will be removed
* in HBase 3.0.0. Use {@link #add(Cell)} instead
* @return this for invocation chaining
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)}
* instead
*/
@SuppressWarnings("unchecked")
@Deprecated
@ -152,7 +153,7 @@ public class Delete extends Mutation {
/**
* Add an existing delete marker to this Delete object.
* @param cell An existing cell of type "delete".
* @return this for invocation chaining n
* @return this for invocation chaining
*/
public Delete add(Cell cell) throws IOException {
super.add(cell);

View File

@ -87,7 +87,7 @@ public class Get extends Query implements Row {
}
/**
* Copy-constructor n
* Copy-constructor
*/
public Get(Get get) {
this(get.getRow());
@ -126,7 +126,7 @@ public class Get extends Query implements Row {
}
/**
* Create a Get operation for the specified row. nnn
* Create a Get operation for the specified row.
*/
public Get(byte[] row, int rowOffset, int rowLength) {
Mutation.checkRow(row, rowOffset, rowLength);
@ -134,7 +134,7 @@ public class Get extends Query implements Row {
}
/**
* Create a Get operation for the specified row. n
* Create a Get operation for the specified row.
*/
public Get(ByteBuffer row) {
Mutation.checkRow(row);
@ -208,7 +208,8 @@ public class Get extends Query implements Row {
/**
* Get versions of columns only within the specified timestamp range, [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive n * @return this for invocation chaining
* @param maxStamp maximum timestamp value, exclusive
* @return this for invocation chaining
*/
public Get setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = new TimeRange(minStamp, maxStamp);
@ -351,7 +352,7 @@ public class Get extends Query implements Row {
}
/**
* Method for retrieving the get's row n
* Method for retrieving the get's row
*/
@Override
public byte[] getRow() {
@ -383,7 +384,7 @@ public class Get extends Query implements Row {
}
/**
* Method for retrieving the get's TimeRange n
* Method for retrieving the get's TimeRange
*/
public TimeRange getTimeRange() {
return this.tr;
@ -414,7 +415,7 @@ public class Get extends Query implements Row {
}
/**
* Method for retrieving the get's familyMap n
* Method for retrieving the get's familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
@ -422,7 +423,7 @@ public class Get extends Query implements Row {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools. n
* and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
@ -439,7 +440,7 @@ public class Get extends Query implements Row {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
* @param maxCols a limit on the number of columns output prior to truncation n
* @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {

View File

@ -1124,7 +1124,7 @@ public class HBaseAdmin implements Admin {
}
/**
* n * @return List of {@link HRegionInfo}.
* @return List of {@link HRegionInfo}.
* @throws IOException if a remote or network exception occurs
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link #getRegions(ServerName)}.
@ -1348,7 +1348,7 @@ public class HBaseAdmin implements Admin {
* @param regionName region to compact
* @param columnFamily column family within a table or region
* @param major True if we are to do a major compaction.
* @throws IOException if a remote or network exception occurs n
* @throws IOException if a remote or network exception occurs
*/
private void compactRegion(final byte[] regionName, final byte[] columnFamily,
final boolean major) throws IOException {
@ -2309,7 +2309,7 @@ public class HBaseAdmin implements Admin {
}
/**
* n * @return List of {@link HRegionInfo}.
* @return List of {@link HRegionInfo}.
* @throws IOException if a remote or network exception occurs
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use
* {@link #getRegions(TableName)}.

View File

@ -134,8 +134,8 @@ public class HTableMultiplexer {
/**
* The put request will be buffered by its corresponding buffer queue. Return false if the queue
* is already full. nn * @return true if the request can be accepted by its corresponding buffer
* queue.
* is already full.
* @return true if the request can be accepted by its corresponding buffer queue.
*/
public boolean put(TableName tableName, final Put put) {
return put(tableName, put, this.maxAttempts);
@ -143,7 +143,8 @@ public class HTableMultiplexer {
/**
* The puts request will be buffered by their corresponding buffer queue. Return the list of puts
* which could not be queued. nn * @return the list of puts which could not be queued
* which could not be queued.
* @return the list of puts which could not be queued
*/
public List<Put> put(TableName tableName, final List<Put> puts) {
if (puts == null) return null;

View File

@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public class ImmutableHColumnDescriptor extends HColumnDescriptor {
/*
* Create an unmodifyable copy of an HColumnDescriptor n
* Create an unmodifyable copy of an HColumnDescriptor
*/
ImmutableHColumnDescriptor(final HColumnDescriptor desc) {
super(desc, false);

View File

@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience;
public class ImmutableHRegionInfo extends HRegionInfo {
/*
* Creates an immutable copy of an HRegionInfo. n
* Creates an immutable copy of an HRegionInfo.
*/
public ImmutableHRegionInfo(RegionInfo other) {
super(other);

View File

@ -41,7 +41,7 @@ public class ImmutableHTableDescriptor extends HTableDescriptor {
}
/*
* Create an unmodifyable copy of an HTableDescriptor n
* Create an unmodifyable copy of an HTableDescriptor
*/
public ImmutableHTableDescriptor(final HTableDescriptor desc) {
super(desc, false);

View File

@ -92,7 +92,8 @@ public class Increment extends Mutation {
/**
* Add the specified KeyValue to this operation.
* @param cell individual Cell n * @throws java.io.IOException e
* @param cell individual Cell
* @throws java.io.IOException e
*/
public Increment add(Cell cell) throws IOException {
super.add(cell);
@ -120,7 +121,7 @@ public class Increment extends Mutation {
}
/**
* Gets the TimeRange used for this increment. n
* Gets the TimeRange used for this increment.
*/
public TimeRange getTimeRange() {
return this.tr;
@ -138,7 +139,7 @@ public class Increment extends Mutation {
* This range is used as [minStamp, maxStamp).
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range n
* @throws IOException if invalid time range
*/
public Increment setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = new TimeRange(minStamp, maxStamp);
@ -208,8 +209,7 @@ public class Increment extends Mutation {
}
/**
* n
*/
* */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();

View File

@ -107,7 +107,7 @@ abstract class MasterCallable<V> implements RetryingCallable<V>, Closeable {
* configured to make this rpc call, use getRpcController(). We are trying to contain
* rpcController references so we don't pollute codebase with protobuf references; keep the
* protobuf references contained and only present in a few classes rather than all about the code
* base. n
* base.
*/
protected abstract V rpcCall() throws Exception;

View File

@ -186,9 +186,7 @@ public class MetaCache {
}
}
/**
* n * @return Map of cached locations for passed <code>tableName</code>
*/
/** Returns Map of cached locations for passed <code>tableName</code> */
private ConcurrentNavigableMap<byte[], RegionLocations>
getTableLocations(final TableName tableName) {
// find the map of cached locations for this table
@ -287,7 +285,7 @@ public class MetaCache {
/**
* Delete a cached location, no matter what it is. Called when we were told to not use cache.
* @param tableName tableName n
* @param tableName tableName
*/
public void clearCache(final TableName tableName, final byte[] row) {
ConcurrentMap<byte[], RegionLocations> tableLocations = getTableLocations(tableName);

View File

@ -60,7 +60,7 @@ public final class MultiAction {
/**
* Add an Action to this container based on it's regionName. If the regionName is wrong, the
* initial execution will fail, but will be automatically retried after looking up the correct
* region. nn
* region.
*/
public void add(byte[] regionName, Action a) {
add(regionName, Collections.singletonList(a));
@ -69,7 +69,8 @@ public final class MultiAction {
/**
* Add an Action to this container based on it's regionName. If the regionName is wrong, the
* initial execution will fail, but will be automatically retried after looking up the correct
* region. n * @param actionList list of actions to add for the region
* region.
* @param actionList list of actions to add for the region
*/
public void add(byte[] regionName, List<Action> actionList) {
List<Action> rsActions = actions.get(regionName);

View File

@ -54,9 +54,9 @@ public class MultiResponse extends AbstractResponse {
}
/**
* Add the pair to the container, grouped by the regionName n * @param originalIndex the original
* index of the Action (request).
* @param resOrEx the result or error; will be empty for successful Put and Delete actions.
* Add the pair to the container, grouped by the regionName
* @param originalIndex the original index of the Action (request).
* @param resOrEx the result or error; will be empty for successful Put and Delete actions.
*/
public void add(byte[] regionName, int originalIndex, Object resOrEx) {
getResult(regionName).addResult(originalIndex, resOrEx);

View File

@ -179,7 +179,7 @@ class MutableRegionInfo implements RegionInfo {
}
/**
* Get current table name of the region n
* Get current table name of the region
*/
@Override
public TableName getTable() {
@ -230,7 +230,8 @@ class MutableRegionInfo implements RegionInfo {
}
/**
* @param split set split status n
* Change the split status flag.
* @param split set split status
*/
public MutableRegionInfo setSplit(boolean split) {
this.split = split;
@ -251,7 +252,7 @@ class MutableRegionInfo implements RegionInfo {
/**
* The parent of a region split is offline while split daughters hold references to the parent.
* Offlined regions are closed.
* @param offLine Set online/offline status. n
* @param offLine Set online/offline status.
*/
public MutableRegionInfo setOffline(boolean offLine) {
this.offLine = offLine;

View File

@ -161,8 +161,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Create a KeyValue with this objects row key and the Put identifier. nnnn * @param tags -
* Specify the Tags as an Array
* Create a KeyValue with this objects row key and the Put identifier.
* @param tags - Specify the Tags as an Array
* @return a KeyValue with this objects row key and the Put identifier.
*/
KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value, Tag[] tags) {
@ -183,7 +183,7 @@ public abstract class Mutation extends OperationWithAttributes
/**
* Compile the column family (i.e. schema) information into a Map. Useful for parsing and
* aggregation by debugging, logging, and administration tools. n
* aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
@ -202,7 +202,7 @@ public abstract class Mutation extends OperationWithAttributes
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
* @param maxCols a limit on the number of columns output prior to truncation n
* @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
@ -265,7 +265,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Set the durability for this mutation n
* Set the durability for this mutation
*/
public Mutation setDurability(Durability d) {
this.durability = d;
@ -278,7 +278,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Method for retrieving the put's familyMap n
* Method for retrieving the put's familyMap
*/
public NavigableMap<byte[], List<Cell>> getFamilyCellMap() {
return this.familyMap;
@ -306,7 +306,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Method for retrieving the delete's row n
* Method for retrieving the delete's row
*/
@Override
public byte[] getRow() {
@ -324,8 +324,9 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Method for retrieving the timestamp n * @deprecated As of release 2.0.0, this will be removed
* in HBase 3.0.0. Use {@link #getTimestamp()} instead
* Method for retrieving the timestamp
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
* {@link #getTimestamp()} instead
*/
@Deprecated
public long getTimeStamp() {
@ -333,7 +334,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Method for retrieving the timestamp. n
* Method for retrieving the timestamp.
*/
public long getTimestamp() {
return this.ts;
@ -369,7 +370,7 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Sets the visibility expression associated with cells in this Mutation. n
* Sets the visibility expression associated with cells in this Mutation.
*/
public Mutation setCellVisibility(CellVisibility expression) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@ -385,8 +386,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Create a protocol buffer CellVisibility based on a client CellVisibility. n * @return a
* protocol buffer CellVisibility
* Create a protocol buffer CellVisibility based on a client CellVisibility.
* @return a protocol buffer CellVisibility
*/
static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibility) {
ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder();
@ -395,8 +396,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Convert a protocol buffer CellVisibility to a client CellVisibility n * @return the converted
* client CellVisibility
* Convert a protocol buffer CellVisibility to a client CellVisibility
* @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) {
if (proto == null) return null;
@ -404,8 +405,8 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility n * @return the
* converted client CellVisibility n
* Convert a protocol buffer CellVisibility bytes to a client CellVisibility
* @return the converted client CellVisibility
*/
private static CellVisibility toCellVisibility(byte[] protoBytes)
throws DeserializationException {
@ -510,7 +511,7 @@ public abstract class Mutation extends OperationWithAttributes
/**
* Set the TTL desired for the result of the mutation, in milliseconds.
* @param ttl the TTL desired for the result of the mutation, in milliseconds n
* @param ttl the TTL desired for the result of the mutation, in milliseconds
*/
public Mutation setTTL(long ttl) {
setAttribute(OP_ATTRIBUTE_TTL, Bytes.toBytes(ttl));
@ -626,8 +627,8 @@ public abstract class Mutation extends OperationWithAttributes
/*
* Private method to determine if this object's familyMap contains the given value assigned to the
* given family, qualifier and timestamp respecting the 2 boolean arguments nnnnnn * @return
* returns true if the given family, qualifier timestamp and value already has an existing
* given family, qualifier and timestamp respecting the 2 boolean arguments
* @return returns true if the given family, qualifier timestamp and value already has an existing
* KeyValue object in the family map.
*/
protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS,
@ -689,8 +690,9 @@ public abstract class Mutation extends OperationWithAttributes
}
/**
* @param row Row to check nn * @throws IllegalArgumentException Thrown if <code>row</code> is
* empty or null or &gt; {@link HConstants#MAX_ROW_LENGTH}
* @param row Row to check
* @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or &gt;
* {@link HConstants#MAX_ROW_LENGTH}
* @return <code>row</code>
*/
static byte[] checkRow(final byte[] row, final int offset, final int length) {

View File

@ -94,7 +94,7 @@ public abstract class Operation {
/**
* Produces a string representation of this Operation. It defaults to a JSON representation, but
* falls back to a string representation of the fingerprint and details in the case of a JSON
* encoding failure. n
* encoding failure.
*/
@Override
public String toString() {

View File

@ -106,7 +106,7 @@ public abstract class OperationWithAttributes extends Operation implements Attri
* This method allows you to set an identifier on an operation. The original motivation for this
* was to allow the identifier to be used in slow query logging, but this could obviously be
* useful in other places. One use of this could be to put a class.method identifier in here to
* see where the slow query is coming from. n * id to set for the scan
* see where the slow query is coming from. id to set for the scan
*/
public OperationWithAttributes setId(String id) {
setAttribute(ID_ATRIBUTE, Bytes.toBytes(id));

View File

@ -131,7 +131,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
/**
* Handles failures encountered when communicating with a server. Updates the FailureInfo in
* repeatedFailuresMap to reflect the failure. Throws RepeatedConnectException if the client is in
* Fast fail mode. nn * - the throwable to be handled. n
* Fast fail mode. - the throwable to be handled.
*/
protected void handleFailureToServer(ServerName serverName, Throwable t) {
if (serverName == null || t == null) {
@ -200,7 +200,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
/**
* Checks to see if we are in the Fast fail mode for requests to the server. If a client is unable
* to contact a server for more than fastFailThresholdMilliSec the client will get into fast fail
* mode. n * @return true if the client is in fast fail mode for the server.
* mode.
* @return true if the client is in fast fail mode for the server.
*/
private boolean inFastFailMode(ServerName server) {
FailureInfo fInfo = repeatedFailuresMap.get(server);
@ -224,7 +225,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
* Check to see if the client should try to connnect to the server, inspite of knowing that it is
* in the fast fail mode. The idea here is that we want just one client thread to be actively
* trying to reconnect, while all the other threads trying to reach the server will short circuit.
* n * @return true if the client should try to connect to the server.
* @return true if the client should try to connect to the server.
*/
protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) {
// We believe that the server is down, But, we want to have just one
@ -245,7 +246,7 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor {
}
/**
* This function updates the Failure info for a particular server after the attempt to nnnn
* This function updates the Failure info for a particular server after the attempt to
*/
private void updateFailureInfoForServer(ServerName server, FailureInfo fInfo, boolean didTry,
boolean couldNotCommunicate, boolean retryDespiteFastFailMode) {

View File

@ -60,7 +60,7 @@ public class Put extends Mutation implements HeapSize {
}
/**
* We make a copy of the passed in row key to keep local. nnn
* We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength) {
this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
@ -88,7 +88,7 @@ public class Put extends Mutation implements HeapSize {
}
/**
* We make a copy of the passed in row key to keep local. nnnn
* We make a copy of the passed in row key to keep local.
*/
public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) {
checkRow(rowArray, rowOffset, rowLength);
@ -155,7 +155,7 @@ public class Put extends Mutation implements HeapSize {
* Add the specified column and value to this Put operation.
* @param family family name
* @param qualifier column qualifier
* @param value column value n
* @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, byte[] value) {
return addColumn(family, qualifier, this.ts, value);
@ -178,7 +178,7 @@ public class Put extends Mutation implements HeapSize {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
* @param value column value n
* @param value column value
*/
public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) {
if (ts < 0) {
@ -222,7 +222,7 @@ public class Put extends Mutation implements HeapSize {
* @param family family name
* @param qualifier column qualifier
* @param ts version timestamp
* @param value column value n
* @param value column value
*/
public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) {
if (ts < 0) {
@ -255,7 +255,8 @@ public class Put extends Mutation implements HeapSize {
/**
* Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is
* immutable and its backing array will not be modified for the duration of this Put.
* @param cell individual cell n * @throws java.io.IOException e
* @param cell individual cell
* @throws java.io.IOException e
*/
public Put add(Cell cell) throws IOException {
super.add(cell);

View File

@ -47,9 +47,6 @@ public abstract class Query extends OperationWithAttributes {
protected Map<byte[], TimeRange> colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
protected Boolean loadColumnFamiliesOnDemand = null;
/**
* n
*/
public Filter getFilter() {
return filter;
}
@ -67,7 +64,7 @@ public abstract class Query extends OperationWithAttributes {
}
/**
* Sets the authorizations to be used by this Query n
* Sets the authorizations to be used by this Query
*/
public Query setAuthorizations(Authorizations authorizations) {
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
@ -131,7 +128,7 @@ public abstract class Query extends OperationWithAttributes {
* Specify region replica id where Query will fetch data from. Use this together with
* {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a
* specific replicaId. <br>
* <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing n
* <b> Expert: </b>This is an advanced API exposed. Only use it if you know what you are doing
*/
public Query setReplicaId(int Id) {
this.targetReplicaId = Id;
@ -208,7 +205,7 @@ public abstract class Query extends OperationWithAttributes {
* Column Family time ranges take precedence over the global time range.
* @param cf the column family for which you want to restrict
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive n
* @param maxStamp maximum timestamp value, exclusive
*/
public Query setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {

View File

@ -184,7 +184,7 @@ public abstract class RegionAdminServiceCallable<T> implements RetryingCallable<
* Run RPC call.
* @param rpcController PayloadCarryingRpcController is a mouthful but it at a minimum is a facade
* on protobuf so we don't have to put protobuf everywhere; we can keep it
* behind this class. n
* behind this class.
*/
protected abstract T call(HBaseRpcController rpcController) throws Exception;
}

View File

@ -59,7 +59,8 @@ public class RegionInfoDisplay {
}
/**
* Get the start key for display. Optionally hide the real start key. nn * @return the startkey
* Get the start key for display. Optionally hide the real start key.
* @return the startkey
*/
public static byte[] getStartKeyForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);
@ -68,15 +69,16 @@ public class RegionInfoDisplay {
}
/**
* Get the region name for display. Optionally hide the start key. nn * @return region name as
* String
* Get the region name for display. Optionally hide the start key.
* @return region name as String
*/
public static String getRegionNameAsStringForDisplay(RegionInfo ri, Configuration conf) {
return Bytes.toStringBinary(getRegionNameForDisplay(ri, conf));
}
/**
* Get the region name for display. Optionally hide the start key. nn * @return region name bytes
* Get the region name for display. Optionally hide the start key.
* @return region name bytes
*/
public static byte[] getRegionNameForDisplay(RegionInfo ri, Configuration conf) {
boolean displayKey = conf.getBoolean(DISPLAY_KEYS_KEY, true);

View File

@ -51,7 +51,8 @@ public class RegionReplicaUtil {
/**
* Returns the RegionInfo for the given replicaId. RegionInfo's correspond to a range of a table,
* but more than one "instance" of the same range can be deployed which are differentiated by the
* replicaId. n * @param replicaId the replicaId to use
* replicaId.
* @param replicaId the replicaId to use
* @return an RegionInfo object corresponding to the same range (table, start and end key), but
* for the given replicaId.
*/
@ -84,7 +85,7 @@ public class RegionReplicaUtil {
}
/**
* Removes the non-default replicas from the passed regions collection n
* Removes the non-default replicas from the passed regions collection
*/
public static void removeNonDefaultRegions(Collection<RegionInfo> regions) {
Iterator<RegionInfo> iterator = regions.iterator();

View File

@ -132,7 +132,7 @@ public abstract class RegionServerCallable<T, S> implements RetryingCallable<T>
* configured to make this rpc call, use getRpcController(). We are trying to contain
* rpcController references so we don't pollute codebase with protobuf references; keep the
* protobuf references contained and only present in a few classes rather than all about the code
* base. n
* base.
*/
protected abstract T rpcCall() throws Exception;

View File

@ -186,7 +186,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Method for retrieving the row key that corresponds to the row from which this Result was
* created. n
* created.
*/
public byte[] getRow() {
if (this.row == null) {
@ -227,8 +227,9 @@ public class Result implements CellScannable, CellScanner {
* or Get) only requested 1 version the list will contain at most 1 entry. If the column did not
* exist in the result set (either the column does not exist or the column was not selected in the
* query) the list will be empty. Also see getColumnLatest which returns just a Cell
* @param family the family n * @return a list of Cells for this column or empty list if the
* column did not exist in the result set
* @param family the family
* @return a list of Cells for this column or empty list if the column did not exist in the result
* set
*/
public List<Cell> getColumnCells(byte[] family, byte[] qualifier) {
List<Cell> result = new ArrayList<>();
@ -324,7 +325,7 @@ public class Result implements CellScannable, CellScanner {
}
/**
* The Cell for the most recent timestamp for a given column. nn *
* The Cell for the most recent timestamp for a given column.
* @return the Cell for the column, or null if no value exists in the row or none have been
* selected in the query (Get/Scan)
*/
@ -677,8 +678,7 @@ public class Result implements CellScannable, CellScanner {
}
/**
* n
*/
* */
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
@ -800,7 +800,8 @@ public class Result implements CellScannable, CellScanner {
}
/**
* Get total size of raw cells n * @return Total size.
* Get total size of raw cells
* @return Total size.
*/
public static long getTotalSizeOfCells(Result result) {
long size = 0;
@ -816,7 +817,7 @@ public class Result implements CellScannable, CellScanner {
/**
* Copy another Result into this one. Needed for the old Mapred framework
* @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT (which is supposed
* to be immutable). n
* to be immutable).
*/
public void copyFrom(Result other) {
checkReadonly();

View File

@ -86,7 +86,7 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
* setting (or hbase.client.scanner.caching in hbase-site.xml).
* @param nbRows number of rows to return
* @return Between zero and nbRows rowResults. Scan is done if returned array is of zero-length
* (We never return null). n
* (We never return null).
*/
default Result[] next(int nbRows) throws IOException {
List<Result> resultSets = new ArrayList<>(nbRows);

View File

@ -46,20 +46,20 @@ abstract class RetryingCallerInterceptor {
public abstract RetryingCallerInterceptorContext createEmptyContext();
/**
* Call this function in case we caught a failure during retries. n * : The context object that we
* obtained previously. n * : The exception that we caught in this particular try n
* Call this function in case we caught a failure during retries. : The context object that we
* obtained previously. : The exception that we caught in this particular try
*/
public abstract void handleFailure(RetryingCallerInterceptorContext context, Throwable t)
throws IOException;
/**
* Call this function alongside the actual call done on the callable. nn
* Call this function alongside the actual call done on the callable.
*/
public abstract void intercept(
RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) throws IOException;
/**
* Call this function to update at the end of the retry. This is not necessary to happen. n
* Call this function to update at the end of the retry. This is not necessary to happen.
*/
public abstract void updateFailureInfo(RetryingCallerInterceptorContext context);

View File

@ -38,17 +38,17 @@ abstract class RetryingCallerInterceptorContext {
/**
* This prepares the context object by populating it with information specific to the
* implementation of the {@link RetryingCallerInterceptor} along with which this will be used. n *
* : The {@link RetryingCallable} that contains the information about the call that is being made.
* implementation of the {@link RetryingCallerInterceptor} along with which this will be used. :
* The {@link RetryingCallable} that contains the information about the call that is being made.
* @return A new {@link RetryingCallerInterceptorContext} object that can be used for use in the
* current retrying call
*/
public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable);
/**
* Telescopic extension that takes which of the many retries we are currently in. n * : The
* {@link RetryingCallable} that contains the information about the call that is being made. n * :
* The retry number that we are currently in.
* Telescopic extension that takes which of the many retries we are currently in. : The
* {@link RetryingCallable} that contains the information about the call that is being made. : The
* retry number that we are currently in.
* @return A new context object that can be used for use in the current retrying call
*/
public abstract RetryingCallerInterceptorContext prepare(RetryingCallable<?> callable, int tries);

View File

@ -39,8 +39,8 @@ public class RowMutations implements Row {
/**
* Create a {@link RowMutations} with the specified mutations.
* @param mutations the mutations to send n * @throws IOException if any row in mutations is
* different to another
* @param mutations the mutations to send
* @throws IOException if any row in mutations is different to another
*/
public static RowMutations of(List<? extends Mutation> mutations) throws IOException {
if (CollectionUtils.isEmpty(mutations)) {

View File

@ -326,7 +326,7 @@ public class Scan extends Query {
* Get all columns from the specified family.
* <p>
* Overrides previous calls to addColumn for this family.
* @param family family name n
* @param family family name
*/
public Scan addFamily(byte[] family) {
familyMap.remove(family);
@ -339,7 +339,7 @@ public class Scan extends Query {
* <p>
* Overrides previous calls to addFamily for this family.
* @param family family name
* @param qualifier column qualifier n
* @param qualifier column qualifier
*/
public Scan addColumn(byte[] family, byte[] qualifier) {
NavigableSet<byte[]> set = familyMap.get(family);
@ -361,7 +361,7 @@ public class Scan extends Query {
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @see #setMaxVersions()
* @see #setMaxVersions(int) n
* @see #setMaxVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
tr = new TimeRange(minStamp, maxStamp);
@ -374,8 +374,9 @@ public class Scan extends Query {
* number of versions beyond the defaut.
* @param timestamp version timestamp
* @see #setMaxVersions()
* @see #setMaxVersions(int) n * @deprecated As of release 2.0.0, this will be removed in HBase
* 3.0.0. Use {@link #setTimestamp(long)} instead
* @see #setMaxVersions(int)
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use
* {@link #setTimestamp(long)} instead
*/
@Deprecated
public Scan setTimeStamp(long timestamp) throws IOException {
@ -388,7 +389,7 @@ public class Scan extends Query {
* number of versions beyond the defaut.
* @param timestamp version timestamp
* @see #setMaxVersions()
* @see #setMaxVersions(int) n
* @see #setMaxVersions(int)
*/
public Scan setTimestamp(long timestamp) {
try {
@ -412,9 +413,9 @@ public class Scan extends Query {
* <p>
* If the specified row does not exist, the Scanner will start from the next closest row after the
* specified row.
* @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
* startRow does not meet criteria for a row key (when length exceeds
* {@link HConstants#MAX_ROW_LENGTH})
* @param startRow row to start scanner at or after
* @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
* @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStartRow(byte[])}
* instead. This method may change the inclusive of the stop row to keep compatible
* with the old behavior.
@ -436,9 +437,9 @@ public class Scan extends Query {
* <p>
* If the specified row does not exist, the Scanner will start from the next closest row after the
* specified row.
* @param startRow row to start scanner at or after n * @throws IllegalArgumentException if
* startRow does not meet criteria for a row key (when length exceeds
* {@link HConstants#MAX_ROW_LENGTH})
* @param startRow row to start scanner at or after
* @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow) {
return withStartRow(startRow, true);
@ -450,9 +451,9 @@ public class Scan extends Query {
* If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
* will start from the next closest row after the specified row.
* @param startRow row to start scanner at or after
* @param inclusive whether we should include the start row when scan n * @throws
* IllegalArgumentException if startRow does not meet criteria for a row key
* (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
* @param inclusive whether we should include the start row when scan
* @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStartRow(byte[] startRow, boolean inclusive) {
if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
@ -472,9 +473,9 @@ public class Scan extends Query {
* <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
* {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
* </p>
* @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
* not meet criteria for a row key (when length exceeds
* {@link HConstants#MAX_ROW_LENGTH})
* @param stopRow row to end at (exclusive)
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
* @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #withStopRow(byte[])} instead.
* This method may change the inclusive of the stop row to keep compatible with the
* old behavior.
@ -499,9 +500,9 @@ public class Scan extends Query {
* <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
* {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
* </p>
* @param stopRow row to end at (exclusive) n * @throws IllegalArgumentException if stopRow does
* not meet criteria for a row key (when length exceeds
* {@link HConstants#MAX_ROW_LENGTH})
* @param stopRow row to end at (exclusive)
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow) {
return withStopRow(stopRow, false);
@ -513,9 +514,9 @@ public class Scan extends Query {
* The scan will include rows that are lexicographically less than (or equal to if
* {@code inclusive} is {@code true}) the provided stopRow.
* @param stopRow row to end at
* @param inclusive whether we should include the stop row when scan n * @throws
* IllegalArgumentException if stopRow does not meet criteria for a row key (when
* length exceeds {@link HConstants#MAX_ROW_LENGTH})
* @param inclusive whether we should include the stop row when scan
* @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
* exceeds {@link HConstants#MAX_ROW_LENGTH})
*/
public Scan withStopRow(byte[] stopRow, boolean inclusive) {
if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
@ -543,7 +544,7 @@ public class Scan extends Query {
* <b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])} after this
* method will yield undefined results.</b>
* </p>
* @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.) n
* @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
*/
public Scan setRowPrefixFilter(byte[] rowPrefix) {
if (rowPrefix == null) {
@ -557,9 +558,9 @@ public class Scan extends Query {
}
/**
* Get all available versions. n * @deprecated since 2.0.0 and will be removed in 3.0.0. It is
* easy to misunderstand with column family's max versions, so use {@link #readAllVersions()}
* instead.
* Get all available versions.
* @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
* family's max versions, so use {@link #readAllVersions()} instead.
* @see #readAllVersions()
* @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
*/
@ -570,9 +571,9 @@ public class Scan extends Query {
/**
* Get up to the specified number of versions of each column.
* @param maxVersions maximum versions for each column n * @deprecated since 2.0.0 and will be
* removed in 3.0.0. It is easy to misunderstand with column family's max
* versions, so use {@link #readVersions(int)} instead.
* @param maxVersions maximum versions for each column
* @deprecated since 2.0.0 and will be removed in 3.0.0. It is easy to misunderstand with column
* family's max versions, so use {@link #readVersions(int)} instead.
* @see #readVersions(int)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-17125">HBASE-17125</a>
*/
@ -582,7 +583,7 @@ public class Scan extends Query {
}
/**
* Get all available versions. n
* Get all available versions.
*/
public Scan readAllVersions() {
this.maxVersions = Integer.MAX_VALUE;
@ -591,7 +592,7 @@ public class Scan extends Query {
/**
* Get up to the specified number of versions of each column.
* @param versions specified number of versions for each column n
* @param versions specified number of versions for each column
*/
public Scan readVersions(int versions) {
this.maxVersions = versions;
@ -669,7 +670,7 @@ public class Scan extends Query {
/**
* Setting the familyMap
* @param familyMap map of family to qualifier n
* @param familyMap map of family to qualifier
*/
public Scan setFamilyMap(Map<byte[], NavigableSet<byte[]>> familyMap) {
this.familyMap = familyMap;
@ -677,7 +678,7 @@ public class Scan extends Query {
}
/**
* Getting the familyMap n
* Getting the familyMap
*/
public Map<byte[], NavigableSet<byte[]>> getFamilyMap() {
return this.familyMap;
@ -752,16 +753,12 @@ public class Scan extends Query {
return this.caching;
}
/**
* n
*/
/** Returns TimeRange */
public TimeRange getTimeRange() {
return this.tr;
}
/**
* n
*/
/** Returns RowFilter */
@Override
public Filter getFilter() {
return filter;
@ -796,7 +793,7 @@ public class Scan extends Query {
* Set whether this scan is a reversed one
* <p>
* This is false by default which means forward(normal) scan.
* @param reversed if true, scan will be backward order n
* @param reversed if true, scan will be backward order
*/
public Scan setReversed(boolean reversed) {
this.reversed = reversed;
@ -815,7 +812,8 @@ public class Scan extends Query {
* Setting whether the caller wants to see the partial results when server returns
* less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client. By
* default this value is false and the complete results will be assembled client side before being
* delivered to the caller. nn * @see Result#mayHaveMoreCellsInRow()
* delivered to the caller.
* @see Result#mayHaveMoreCellsInRow()
* @see #setBatch(int)
*/
public Scan setAllowPartialResults(final boolean allowPartialResults) {
@ -839,7 +837,7 @@ public class Scan extends Query {
/**
* Compile the table and column family (i.e. schema) information into a String. Useful for parsing
* and aggregation by debugging, logging, and administration tools. n
* and aggregation by debugging, logging, and administration tools.
*/
@Override
public Map<String, Object> getFingerprint() {
@ -861,7 +859,7 @@ public class Scan extends Query {
* Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a
* Map along with the fingerprinted information. Useful for debugging, logging, and administration
* tools.
* @param maxCols a limit on the number of columns output prior to truncation n
* @param maxCols a limit on the number of columns output prior to truncation
*/
@Override
public Map<String, Object> toMap(int maxCols) {
@ -942,11 +940,12 @@ public class Scan extends Query {
* non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
* if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
* better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
* data block(64KB), it could be considered as a small scan. n * @deprecated since 2.0.0 and will
* be removed in 3.0.0. Use {@link #setLimit(int)} and {@link #setReadType(ReadType)} instead. And
* for the one rpc optimization, now we will also fetch data when openScanner, and if the number
* of rows reaches the limit then we will close the scanner automatically which means we will fall
* back to one rpc.
* data block(64KB), it could be considered as a small scan.
* @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #setLimit(int)} and
* {@link #setReadType(ReadType)} instead. And for the one rpc optimization, now we
* will also fetch data when openScanner, and if the number of rows reaches the limit
* then we will close the scanner automatically which means we will fall back to one
* rpc.
* @see #setLimit(int)
* @see #setReadType(ReadType)
* @see <a href="https://issues.apache.org/jira/browse/HBASE-17045">HBASE-17045</a>
@ -1065,7 +1064,7 @@ public class Scan extends Query {
* reaches this value.
* <p>
* This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
* @param limit the limit of rows for this scan n
* @param limit the limit of rows for this scan
*/
public Scan setLimit(int limit) {
this.limit = limit;
@ -1074,7 +1073,7 @@ public class Scan extends Query {
/**
* Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
* set {@code readType} to {@link ReadType#PREAD}. n
* set {@code readType} to {@link ReadType#PREAD}.
*/
public Scan setOneRowLimit() {
return setLimit(1).setReadType(ReadType.PREAD);
@ -1096,7 +1095,7 @@ public class Scan extends Query {
* Set the read type for this scan.
* <p>
* Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
* example, we will always use pread if this is a get scan. n
* example, we will always use pread if this is a get scan.
*/
public Scan setReadType(ReadType readType) {
this.readType = readType;

View File

@ -99,8 +99,8 @@ public class SecureBulkLoadClient {
}
/**
* Securely bulk load a list of HFiles using client protocol. nnnnnn * @return true if all are
* loaded n
* Securely bulk load a list of HFiles using client protocol.
* @return true if all are loaded
*/
public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,
@ -110,8 +110,8 @@ public class SecureBulkLoadClient {
}
/**
* Securely bulk load a list of HFiles using client protocol. nnnnnnn * @return true if all are
* loaded n
* Securely bulk load a list of HFiles using client protocol.
* @return true if all are loaded
*/
public boolean secureBulkLoadHFiles(final ClientService.BlockingInterface client,
final List<Pair<byte[], String>> familyPaths, final byte[] regionName, boolean assignSeqNum,

View File

@ -147,7 +147,8 @@ public interface Table extends Closeable {
* @param results Empty Object[], same size as actions. Provides access to partial results, in
* case an exception is thrown. A null in the result array means that the call for
* that action failed, even after retries. The order of the objects in the results
* array corresponds to the order of actions in the request list. n * @since 0.90.0
* array corresponds to the order of actions in the request list.
* @since 0.90.0
*/
default void batch(final List<? extends Row> actions, final Object[] results)
throws IOException, InterruptedException {
@ -358,8 +359,8 @@ public interface Table extends Closeable {
* @apiNote In 3.0.0 version, the input list {@code deletes} will no longer be modified. Also,
* {@link #put(List)} runs pre-flight validations on the input list on client. Currently
* {@link #delete(List)} doesn't run validations on the client, there is no need
* currently, but this may change in the future. An * {@link IllegalArgumentException}
* will be thrown in this case.
* currently, but this may change in the future. An {@link IllegalArgumentException} will
* be thrown in this case.
*/
default void delete(List<Delete> deletes) throws IOException {
throw new NotImplementedException("Add an implementation!");
@ -770,12 +771,12 @@ public interface Table extends Closeable {
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive), all
* the invocations to the same region server will be batched into one call. The coprocessor
* service is invoked according to the service instance, method name and parameters. n * the
* descriptor for the protobuf service method to call. n * the method call parameters n * start
* region selection with region containing this row. If {@code null}, the selection will start
* with the first table region. n * select regions up to and including the region containing this
* row. If {@code null}, selection will continue through the last table region. n * the proto type
* of the response of the method in Service.
* service is invoked according to the service instance, method name and parameters. the
* descriptor for the protobuf service method to call. the method call parameters start region
* selection with region containing this row. If {@code null}, the selection will start with the
* first table region. select regions up to and including the region containing this row. If
* {@code null}, selection will continue through the last table region. the proto type of the
* response of the method in Service.
* @param <R> the response type for the coprocessor Service method
* @return a map of result values keyed by region name
*/

View File

@ -166,7 +166,7 @@ public interface TableDescriptor {
String getRegionSplitPolicyClassName();
/**
* Get the name of the table n
* Get the name of the table
*/
TableName getTableName();

View File

@ -895,7 +895,7 @@ public class TableDescriptorBuilder {
}
/**
* Get the name of the table n
* Get the name of the table
*/
@Override
public TableName getTableName() {
@ -1297,7 +1297,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
* @param className Full class name. n * @return the modifyable TD
* @param className Full class name.
* @return the modifyable TD
*/
public ModifyableTableDescriptor setCoprocessor(String className) throws IOException {
return setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className)
@ -1345,8 +1346,8 @@ public class TableDescriptorBuilder {
* org.apache.hadoop.hbase.coprocessor.RegionObserver or Endpoint. It won't check if the class
* can be loaded or not. Whether a coprocessor is loadable or not will be determined when a
* region is opened.
* @param specStr The Coprocessor specification all in in one String n * @return the modifyable
* TD
* @param specStr The Coprocessor specification all in in one String
* @return the modifyable TD
* @deprecated used by HTableDescriptor and admin.rb. As of release 2.0.0, this will be removed
* in HBase 3.0.0.
*/
@ -1488,8 +1489,8 @@ public class TableDescriptorBuilder {
/**
* @param bytes A pb serialized {@link ModifyableTableDescriptor} instance with pb magic prefix
* @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code> n
* * @see #toByteArray()
* @return An instance of {@link ModifyableTableDescriptor} made from <code>bytes</code>
* @see #toByteArray()
*/
private static TableDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
if (!ProtobufUtil.isPBMagicPrefix(bytes)) {

View File

@ -42,7 +42,7 @@ public class TableState {
/**
* Covert from PB version of State
* @param state convert from n
* @param state convert from
*/
public static State convert(HBaseProtos.TableState.State state) {
State ret;
@ -66,7 +66,7 @@ public class TableState {
}
/**
* Covert to PB version of State n
* Covert to PB version of State
*/
public HBaseProtos.TableState.State convert() {
HBaseProtos.TableState.State state;
@ -140,7 +140,7 @@ public class TableState {
}
/**
* Table name for state n
* Table name for state
*/
public TableName getTableName() {
return tableName;
@ -168,7 +168,7 @@ public class TableState {
}
/**
* Covert to PB version of TableState n
* Covert to PB version of TableState
*/
public HBaseProtos.TableState convert() {
return HBaseProtos.TableState.newBuilder().setState(this.state.convert()).build();
@ -177,7 +177,7 @@ public class TableState {
/**
* Covert from PB version of TableState
* @param tableName table this state of
* @param tableState convert from n
* @param tableState convert from
*/
public static TableState convert(TableName tableName, HBaseProtos.TableState tableState) {
TableState.State state = State.convert(tableState.getState());

View File

@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience;
@Deprecated
class UnmodifyableHRegionInfo extends HRegionInfo {
/*
* Creates an unmodifyable copy of an HRegionInfo n
* Creates an unmodifyable copy of an HRegionInfo
*/
UnmodifyableHRegionInfo(HRegionInfo info) {
super(info);

View File

@ -33,7 +33,7 @@ public class ServerStatistics {
/**
* Good enough attempt. Last writer wins. It doesn't really matter which one gets to update, as
* something gets set nn
* something gets set
*/
public void update(byte[] region, RegionLoadStats currentStats) {
RegionStatistics regionStat = this.stats.get(region);

View File

@ -35,8 +35,8 @@ public class ServerSideScanMetrics {
private final Map<String, AtomicLong> counters = new HashMap<>();
/**
* Create a new counter with the specified name n * @return {@link AtomicLong} instance for the
* counter with counterName
* Create a new counter with the specified name
* @return {@link AtomicLong} instance for the counter with counterName
*/
protected AtomicLong createCounter(String counterName) {
AtomicLong c = new AtomicLong(0);
@ -75,9 +75,6 @@ public class ServerSideScanMetrics {
*/
public final AtomicLong countOfRowsScanned = createCounter(COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME);
/**
* nn
*/
public void setCounter(String counterName, long value) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {
@ -85,23 +82,16 @@ public class ServerSideScanMetrics {
}
}
/**
* n * @return true if a counter exists with the counterName
*/
/** Returns true if a counter exists with the counterName */
public boolean hasCounter(String counterName) {
return this.counters.containsKey(counterName);
}
/**
* n * @return {@link AtomicLong} instance for this counter name, null if counter does not exist.
*/
/** Returns {@link AtomicLong} instance for this counter name, null if counter does not exist. */
public AtomicLong getCounter(String counterName) {
return this.counters.get(counterName);
}
/**
* nn
*/
public void addToCounter(String counterName, long delta) {
AtomicLong c = this.counters.get(counterName);
if (c != null) {

View File

@ -178,7 +178,8 @@ public class ReplicationAdmin implements Closeable {
/**
* Get the number of slave clusters the local cluster has.
* @return number of slave clusters n * @deprecated
* @return number of slave clusters
* @deprecated
*/
@Deprecated
public int getPeersCount() throws IOException {
@ -222,8 +223,9 @@ public class ReplicationAdmin implements Closeable {
/**
* Append the replicable table-cf config of the specified peer
* @param id a short that identifies the cluster
* @param tableCfs table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
* removed in 3.0.0, use {@link #appendPeerTableCFs(String, Map)} instead.
* @param tableCfs table-cfs config str
* @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
* {@link #appendPeerTableCFs(String, Map)} instead.
*/
@Deprecated
public void appendPeerTableCFs(String id, String tableCfs)
@ -234,7 +236,7 @@ public class ReplicationAdmin implements Closeable {
/**
* Append the replicable table-cf config of the specified peer
* @param id a short that identifies the cluster
* @param tableCfs A map from tableName to column family names nn
* @param tableCfs A map from tableName to column family names
*/
@Deprecated
public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
@ -245,8 +247,9 @@ public class ReplicationAdmin implements Closeable {
/**
* Remove some table-cfs from table-cfs config of the specified peer
* @param id a short name that identifies the cluster
* @param tableCf table-cfs config str nn * @deprecated as release of 2.0.0, and it will be
* removed in 3.0.0, use {@link #removePeerTableCFs(String, Map)} instead.
* @param tableCf table-cfs config str
* @deprecated as release of 2.0.0, and it will be removed in 3.0.0, use
* {@link #removePeerTableCFs(String, Map)} instead.
*/
@Deprecated
public void removePeerTableCFs(String id, String tableCf)
@ -257,7 +260,7 @@ public class ReplicationAdmin implements Closeable {
/**
* Remove some table-cfs from config of the specified peer
* @param id a short name that identifies the cluster
* @param tableCfs A map from tableName to column family names nn
* @param tableCfs A map from tableName to column family names
*/
@Deprecated
public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)

View File

@ -50,36 +50,27 @@ import org.apache.yetus.audience.InterfaceStability;
public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Message,
R extends Message> {
/**
* nnn * @return value of type T n
*/
/** Returns value of type T */
public abstract T getValue(byte[] colFamily, byte[] colQualifier, Cell c) throws IOException;
/**
* nn * @return sum or non null value among (if either of them is null); otherwise returns a null.
*/
/** Returns sum or non null value among (if either of them is null); otherwise returns a null. */
public abstract S add(S l1, S l2);
/**
* returns the maximum value for this type T n
* returns the maximum value for this type T
*/
public abstract T getMaxValue();
public abstract T getMinValue();
/**
* nnn
*/
/** Returns multiplication */
public abstract S multiply(S o1, S o2);
/**
* nn
*/
/** Returns increment */
public abstract S increment(S o);
/**
* provides casting opportunity between the data types. nn
* provides casting opportunity between the data types.
*/
public abstract S castToReturnType(T o);
@ -94,7 +85,7 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
/**
* used for computing average of &lt;S&gt; data values. Not providing the divide method that takes
* two &lt;S&gt; values as it is not needed as of now. nnn
* two &lt;S&gt; values as it is not needed as of now.
*/
public abstract double divideForAvg(S o, Long l);
@ -110,37 +101,37 @@ public abstract class ColumnInterpreter<T, S, P extends Message, Q extends Messa
/**
* This method should initialize any field(s) of the ColumnInterpreter with a parsing of the
* passed message bytes (used on the server side). n
* passed message bytes (used on the server side).
*/
public abstract void initialize(P msg);
/**
* This method gets the PB message corresponding to the cell type n * @return the PB message for
* the cell-type instance
* This method gets the PB message corresponding to the cell type
* @return the PB message for the cell-type instance
*/
public abstract Q getProtoForCellType(T t);
/**
* This method gets the PB message corresponding to the cell type n * @return the cell-type
* instance from the PB message
* This method gets the PB message corresponding to the cell type
* @return the cell-type instance from the PB message
*/
public abstract T getCellValueFromProto(Q q);
/**
* This method gets the PB message corresponding to the promoted type n * @return the PB message
* for the promoted-type instance
* This method gets the PB message corresponding to the promoted type
* @return the PB message for the promoted-type instance
*/
public abstract R getProtoForPromotedType(S s);
/**
* This method gets the promoted type from the proto message n * @return the promoted-type
* instance from the PB message
* This method gets the promoted type from the proto message
* @return the promoted-type instance from the PB message
*/
public abstract S getPromotedValueFromProto(R r);
/**
* The response message comes as type S. This will convert/cast it to T. In some sense, performs
* the opposite of {@link #castToReturnType(Object)} nn
* the opposite of {@link #castToReturnType(Object)}
*/
public abstract T castToCellType(S response);
}

View File

@ -33,7 +33,7 @@ public class CoprocessorException extends DoNotRetryIOException {
}
/**
* Constructor with a Class object and exception message. nn
* Constructor with a Class object and exception message.
*/
public CoprocessorException(Class<?> clazz, String s) {
super("Coprocessor [" + clazz.getName() + "]: " + s);

View File

@ -170,7 +170,7 @@ public final class ClientExceptionsUtil {
/**
* Translates exception for preemptive fast fail checks.
* @param t exception to check
* @return translated exception n
* @return translated exception
*/
public static Throwable translatePFFE(Throwable t) throws IOException {
if (t instanceof NoSuchMethodError) {

View File

@ -35,15 +35,13 @@ public class FailedSanityCheckException extends org.apache.hadoop.hbase.DoNotRet
}
/**
* n
*/
* */
public FailedSanityCheckException(String message) {
super(message);
}
/**
* nn
*/
* */
public FailedSanityCheckException(String message, Throwable cause) {
super(message, cause);
}

View File

@ -65,8 +65,8 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo
/**
* @param pbBytes A pb serialized {@link BinaryComparator} instance
* @return An instance of {@link BinaryComparator} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link BinaryComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static BinaryComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BinaryComparator proto;
@ -79,8 +79,8 @@ public class BinaryComparator extends org.apache.hadoop.hbase.filter.ByteArrayCo
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -69,8 +69,8 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
/**
* @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance
* @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static BinaryPrefixComparator parseFrom(final byte[] pbBytes)
throws DeserializationException {
@ -84,8 +84,8 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -75,7 +75,8 @@ public class BitComparator extends ByteArrayComparable {
/**
* @param pbBytes A pb serialized {@link BitComparator} instance
* @return An instance of {@link BitComparator} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link BitComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static BitComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
ComparatorProtos.BitComparator proto;
@ -89,8 +90,8 @@ public class BitComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -79,22 +79,19 @@ public class ColumnPaginationFilter extends FilterBase {
}
/**
* n
*/
* */
public int getLimit() {
return limit;
}
/**
* n
*/
* */
public int getOffset() {
return offset;
}
/**
* n
*/
* */
public byte[] getColumnOffset() {
return columnOffset;
}
@ -174,8 +171,8 @@ public class ColumnPaginationFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static ColumnPaginationFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {

View File

@ -164,8 +164,8 @@ public class ColumnRangeFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link ColumnRangeFilter} instance
* @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static ColumnRangeFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ColumnRangeFilter proto;

View File

@ -68,9 +68,7 @@ public class ColumnValueFilter extends FilterBase {
this.comparator = Preconditions.checkNotNull(comparator, "Comparator should not be null");
}
/**
* n
*/
/** Returns operator */
public CompareOperator getCompareOperator() {
return op;
}

View File

@ -101,8 +101,7 @@ public abstract class CompareFilter extends FilterBase {
}
/**
* n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
* instead.
* @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
*/
@Deprecated
public CompareOp getOperator() {
@ -287,8 +286,8 @@ public abstract class CompareFilter extends FilterBase {
}
/**
* n * @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter o) {

View File

@ -231,8 +231,8 @@ public class DependentColumnFilter extends CompareFilter {
/**
* @param pbBytes A pb serialized {@link DependentColumnFilter} instance
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static DependentColumnFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {
@ -259,8 +259,8 @@ public class DependentColumnFilter extends CompareFilter {
}
/**
* n * @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE")

View File

@ -102,7 +102,8 @@ public class FamilyFilter extends CompareFilter {
/**
* @param pbBytes A pb serialized {@link FamilyFilter} instance
* @return An instance of {@link FamilyFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link FamilyFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static FamilyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FamilyFilter proto;

View File

@ -241,7 +241,8 @@ public abstract class Filter {
* Concrete implementers can signal a failure condition in their code by throwing an
* {@link IOException}.
* @param pbBytes A pb serialized {@link Filter} instance
* @return An instance of {@link Filter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link Filter} made from <code>bytes</code>
* @see #toByteArray
*/
public static Filter parseFrom(final byte[] pbBytes) throws DeserializationException {
throw new DeserializationException(
@ -250,9 +251,9 @@ public abstract class Filter {
/**
* Concrete implementers can signal a failure condition in their code by throwing an
* {@link IOException}. n * @return true if and only if the fields of the filter that are
* serialized are equal to the corresponding fields in other. Used for testing.
* @throws IOException in case an I/O or an filter specific failure needs to be signaled.
* {@link IOException}.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
abstract boolean areSerializedFieldsEqual(Filter other);

View File

@ -148,9 +148,9 @@ public abstract class FilterBase extends Filter {
}
/**
* Default implementation so that writers of custom filters aren't forced to implement. n
* * @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
* Default implementation so that writers of custom filters aren't forced to implement.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {

View File

@ -84,7 +84,7 @@ final public class FilterList extends FilterBase {
/**
* Constructor that takes a var arg number of {@link Filter}s. The default operator MUST_PASS_ALL
* is assumed. n
* is assumed.
*/
public FilterList(final Filter... filters) {
this(Operator.MUST_PASS_ALL, Arrays.asList(filters));
@ -108,14 +108,14 @@ final public class FilterList extends FilterBase {
}
/**
* Get the operator. n
* Get the operator.
*/
public Operator getOperator() {
return operator;
}
/**
* Get the filters. n
* Get the filters.
*/
public List<Filter> getFilters() {
return filterListBase.getFilters();
@ -206,7 +206,8 @@ final public class FilterList extends FilterBase {
/**
* @param pbBytes A pb serialized {@link FilterList} instance
* @return An instance of {@link FilterList} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link FilterList} made from <code>bytes</code>
* @see #toByteArray
*/
public static FilterList parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FilterList proto;
@ -229,8 +230,8 @@ final public class FilterList extends FilterBase {
}
/**
* n * @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
* @return true if and only if the fields of the filter that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(Filter other) {

View File

@ -92,7 +92,7 @@ public abstract class FilterListBase extends FilterBase {
* the current child, we should set the traverse result (transformed cell) of previous node(s) as
* the initial value. (HBASE-18879).
* @param c The cell in question.
* @return the transformed cell. n
* @return the transformed cell.
*/
@Override
public Cell transformCell(Cell c) throws IOException {

View File

@ -97,7 +97,8 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
/**
* @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
* @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from
* <code>bytes</code> n * @see #toByteArray
* <code>bytes</code>
* @see #toByteArray
*/
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {

View File

@ -138,7 +138,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* We need to preprocess mask array, as since we treat 2's as unfixed positions and -1 (0xff) as
* fixed positions n * @return mask array
* fixed positions
* @return mask array
*/
private byte[] preprocessMask(byte[] mask) {
if (!UNSAFE_UNALIGNED) {
@ -300,8 +301,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link FuzzyRowFilter} instance
* @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static FuzzyRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.FuzzyRowFilter proto;
@ -628,8 +629,8 @@ public class FuzzyRowFilter extends FilterBase {
/**
* For forward scanner, next cell hint should not contain any trailing zeroes unless they are part
* of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01' nn * @param
* toInc - position of incremented byte
* of fuzzyKeyMeta hint = '\x01\x01\x01\x00\x00' will skip valid row '\x01\x01\x01'
* @param toInc - position of incremented byte
* @return trimmed version of result
*/

View File

@ -93,8 +93,8 @@ public class InclusiveStopFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link InclusiveStopFilter} instance
* @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static InclusiveStopFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {

View File

@ -109,7 +109,8 @@ public class KeyOnlyFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link KeyOnlyFilter} instance
* @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static KeyOnlyFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.KeyOnlyFilter proto;

View File

@ -78,8 +78,8 @@ public class LongComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
boolean areSerializedFieldsEqual(LongComparator other) {
if (other == this) return true;

View File

@ -135,8 +135,8 @@ public class MultipleColumnPrefixFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance
* @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static MultipleColumnPrefixFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {

View File

@ -72,8 +72,8 @@ public class NullComparator extends ByteArrayComparable {
/**
* @param pbBytes A pb serialized {@link NullComparator} instance
* @return An instance of {@link NullComparator} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link NullComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static NullComparator parseFrom(final byte[] pbBytes) throws DeserializationException {
try {
@ -86,8 +86,8 @@ public class NullComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -107,7 +107,8 @@ public class PageFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link PageFilter} instance
* @return An instance of {@link PageFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link PageFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static PageFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.PageFilter proto;

View File

@ -38,7 +38,7 @@ public class RandomRowFilter extends FilterBase {
protected boolean filterOutRow;
/**
* Create a new filter with a specified chance for a row to be included. n
* Create a new filter with a specified chance for a row to be included.
*/
public RandomRowFilter(float chance) {
this.chance = chance;
@ -50,7 +50,7 @@ public class RandomRowFilter extends FilterBase {
}
/**
* Set the chance that a row is included. n
* Set the chance that a row is included.
*/
public void setChance(float chance) {
this.chance = chance;
@ -115,8 +115,8 @@ public class RandomRowFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link RandomRowFilter} instance
* @return An instance of {@link RandomRowFilter} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link RandomRowFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static RandomRowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RandomRowFilter proto;

View File

@ -153,8 +153,8 @@ public class RegexStringComparator extends ByteArrayComparable {
/**
* @param pbBytes A pb serialized {@link RegexStringComparator} instance
* @return An instance of {@link RegexStringComparator} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link RegexStringComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static RegexStringComparator parseFrom(final byte[] pbBytes)
throws DeserializationException {
@ -183,8 +183,8 @@ public class RegexStringComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -116,7 +116,8 @@ public class RowFilter extends CompareFilter {
/**
* @param pbBytes A pb serialized {@link RowFilter} instance
* @return An instance of {@link RowFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link RowFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static RowFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.RowFilter proto;

View File

@ -109,9 +109,9 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
}
/**
* Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
* removed in 3.0.0. Use
* {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
* Constructor for protobuf deserialization only.
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
* {@link #SingleColumnValueExcludeFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
*/
@Deprecated
protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
@ -122,7 +122,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
}
/**
* Constructor for protobuf deserialization only. nnnnnn
* Constructor for protobuf deserialization only.
*/
protected SingleColumnValueExcludeFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, ByteArrayComparable comparator, final boolean filterIfMissing,
@ -174,8 +174,8 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
/**
* @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
* @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code> n
* * @see #toByteArray
* @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static SingleColumnValueExcludeFilter parseFrom(final byte[] pbBytes)
throws DeserializationException {

View File

@ -155,10 +155,10 @@ public class SingleColumnValueFilter extends FilterBase {
}
/**
* Constructor for protobuf deserialization only. nnnnnn * @deprecated Since 2.0.0. Will be
* removed in 3.0.0. Use
* {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
* instead.
* Constructor for protobuf deserialization only.
* @deprecated Since 2.0.0. Will be removed in 3.0.0. Use
* {@link #SingleColumnValueFilter(byte[], byte[], CompareOperator, ByteArrayComparable, boolean, boolean)}
* instead.
*/
@Deprecated
protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
@ -169,7 +169,7 @@ public class SingleColumnValueFilter extends FilterBase {
}
/**
* Constructor for protobuf deserialization only. nnnnnn
* Constructor for protobuf deserialization only.
*/
protected SingleColumnValueFilter(final byte[] family, final byte[] qualifier,
final CompareOperator op, org.apache.hadoop.hbase.filter.ByteArrayComparable comparator,
@ -180,8 +180,7 @@ public class SingleColumnValueFilter extends FilterBase {
}
/**
* n * @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()}
* instead.
* @deprecated since 2.0.0. Will be removed in 3.0.0. Use {@link #getCompareOperator()} instead.
*/
@Deprecated
public CompareOp getOperator() {

View File

@ -112,7 +112,8 @@ public class SkipFilter extends FilterBase {
/**
* @param pbBytes A pb serialized {@link SkipFilter} instance
* @return An instance of {@link SkipFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link SkipFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static SkipFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.SkipFilter proto;

View File

@ -77,8 +77,8 @@ public class SubstringComparator extends ByteArrayComparable {
/**
* @param pbBytes A pb serialized {@link SubstringComparator} instance
* @return An instance of {@link SubstringComparator} made from <code>bytes</code> n * @see
* #toByteArray
* @return An instance of {@link SubstringComparator} made from <code>bytes</code>
* @see #toByteArray
*/
public static SubstringComparator parseFrom(final byte[] pbBytes)
throws DeserializationException {
@ -92,8 +92,8 @@ public class SubstringComparator extends ByteArrayComparable {
}
/**
* n * @return true if and only if the fields of the comparator that are serialized are equal to
* the corresponding fields in other. Used for testing.
* @return true if and only if the fields of the comparator that are serialized are equal to the
* corresponding fields in other. Used for testing.
*/
@Override
boolean areSerializedFieldsEqual(ByteArrayComparable other) {

View File

@ -54,7 +54,7 @@ public class TimestampsFilter extends FilterBase {
long minTimestamp = Long.MAX_VALUE;
/**
* Constructor for filter that retains only the specified timestamps in the list. n
* Constructor for filter that retains only the specified timestamps in the list.
*/
public TimestampsFilter(List<Long> timestamps) {
this(timestamps, false);

View File

@ -97,7 +97,8 @@ public class ValueFilter extends CompareFilter {
/**
* @param pbBytes A pb serialized {@link ValueFilter} instance
* @return An instance of {@link ValueFilter} made from <code>bytes</code> n * @see #toByteArray
* @return An instance of {@link ValueFilter} made from <code>bytes</code>
* @see #toByteArray
*/
public static ValueFilter parseFrom(final byte[] pbBytes) throws DeserializationException {
FilterProtos.ValueFilter proto;

View File

@ -104,9 +104,10 @@ class CellBlockBuilder {
/**
* Puts CellScanner Cells into a cell block using passed in <code>codec</code> and/or
* <code>compressor</code>. nnn * @return Null or byte buffer filled with a cellblock filled with
* passed-in Cells encoded using passed in <code>codec</code> and/or <code>compressor</code>; the
* returned buffer has been flipped and is ready for reading. Use limit to find total size. n
* <code>compressor</code>.
* @return Null or byte buffer filled with a cellblock filled with passed-in Cells encoded using
* passed in <code>codec</code> and/or <code>compressor</code>; the returned buffer has
* been flipped and is ready for reading. Use limit to find total size.
*/
public ByteBuffer buildCellBlock(final Codec codec, final CompressionCodec compressor,
final CellScanner cellScanner) throws IOException {

View File

@ -255,7 +255,7 @@ public final class ProtobufUtil {
* Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than
* just {@link ServiceException}. Prefer this method to
* {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf
* references. n
* references.
*/
public static IOException handleRemoteException(Exception e) {
return makeIOExceptionOfException(e);
@ -359,7 +359,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Get to a client Get
* @param proto the protocol buffer Get to convert
* @return the converted client Get n
* @return the converted client Get
*/
public static Get toGet(final ClientProtos.Get proto) throws IOException {
if (proto == null) return null;
@ -444,7 +444,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
* @return A client Put. n
* @return A client Put.
*/
public static Put toPut(final MutationProto proto) throws IOException {
return toPut(proto, null);
@ -454,7 +454,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Put.
* @param proto The protocol buffer MutationProto to convert
* @param cellScanner If non-null, the Cell data that goes with this proto.
* @return A client Put. n
* @return A client Put.
*/
public static Put toPut(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@ -538,7 +538,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
* @return the converted client Delete n
* @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto) throws IOException {
return toDelete(proto, null);
@ -548,7 +548,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Mutate to a Delete
* @param proto the protocol buffer Mutate to convert
* @param cellScanner if non-null, the data that goes with this delete.
* @return the converted client Delete n
* @return the converted client Delete
*/
public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@ -675,9 +675,9 @@ public final class ProtobufUtil {
}
/**
* Convert a protocol buffer Mutate to an Append n * @param proto the protocol buffer Mutate to
* convert
* @return the converted client Append n
* Convert a protocol buffer Mutate to an Append
* @param proto the protocol buffer Mutate to convert
* @return the converted client Append
*/
public static Append toAppend(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@ -695,7 +695,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to an Increment
* @param proto the protocol buffer Mutate to convert
* @return the converted client Increment n
* @return the converted client Increment
*/
public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@ -714,7 +714,7 @@ public final class ProtobufUtil {
/**
* Convert a MutateRequest to Mutation
* @param proto the protocol buffer Mutate to convert
* @return the converted Mutation n
* @return the converted Mutation
*/
public static Mutation toMutation(final MutationProto proto) throws IOException {
MutationType type = proto.getMutateType();
@ -735,7 +735,8 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Mutate to a Get.
* @param proto the protocol buffer Mutate to convert. n * @return the converted client get. n
* @param proto the protocol buffer Mutate to convert.
* @return the converted client get.
*/
public static Get toGet(final MutationProto proto, final CellScanner cellScanner)
throws IOException {
@ -815,7 +816,7 @@ public final class ProtobufUtil {
/**
* Convert a client Scan to a protocol buffer Scan
* @param scan the client Scan to convert
* @return the converted protocol buffer Scan n
* @return the converted protocol buffer Scan
*/
public static ClientProtos.Scan toScan(final Scan scan) throws IOException {
ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder();
@ -908,7 +909,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer Scan to a client Scan
* @param proto the protocol buffer Scan to convert
* @return the converted client Scan n
* @return the converted client Scan
*/
public static Scan toScan(final ClientProtos.Scan proto) throws IOException {
byte[] startRow = HConstants.EMPTY_START_ROW;
@ -1009,7 +1010,7 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer Get based on a client Get.
* @param get the client Get
* @return a protocol buffer Get n
* @return a protocol buffer Get
*/
public static ClientProtos.Get toGet(final Get get) throws IOException {
ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder();
@ -1074,7 +1075,8 @@ public final class ProtobufUtil {
}
/**
* Create a protocol buffer Mutate based on a client Mutation nn * @return a protobuf'd Mutation n
* Create a protocol buffer Mutate based on a client Mutation
* @return a protobuf'd Mutation
*/
public static MutationProto toMutation(final MutationType type, final Mutation mutation,
final long nonce) throws IOException {
@ -1123,8 +1125,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
* Understanding is that the Cell will be transported other than via protobuf. nnn * @return a
* protobuf'd Mutation n
* Understanding is that the Cell will be transported other than via protobuf.
* @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation,
final MutationProto.Builder builder) throws IOException {
@ -1133,8 +1135,8 @@ public final class ProtobufUtil {
/**
* Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data.
* Understanding is that the Cell will be transported other than via protobuf. nn * @return a
* protobuf'd Mutation n
* Understanding is that the Cell will be transported other than via protobuf.
* @return a protobuf'd Mutation
*/
public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation)
throws IOException {
@ -1160,8 +1162,8 @@ public final class ProtobufUtil {
/**
* Code shared by {@link #toMutation(MutationType, Mutation)} and
* {@link #toMutationNoData(MutationType, Mutation)} nn * @return A partly-filled out protobuf'd
* Mutation.
* {@link #toMutationNoData(MutationType, Mutation)}
* @return A partly-filled out protobuf'd Mutation.
*/
private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final MutationType type,
final Mutation mutation, MutationProto.Builder builder) {
@ -1266,7 +1268,7 @@ public final class ProtobufUtil {
* Convert a protocol buffer Result to a client Result
* @param proto the protocol buffer Result to convert
* @param scanner Optional cell scanner.
* @return the converted client Result n
* @return the converted client Result
*/
public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner)
throws IOException {
@ -1380,8 +1382,8 @@ public final class ProtobufUtil {
}
/**
* Convert a delete KeyValue type to protocol buffer DeleteType. n * @return protocol buffer
* DeleteType n
* Convert a delete KeyValue type to protocol buffer DeleteType.
* @return protocol buffer DeleteType
*/
public static DeleteType toDeleteType(KeyValue.Type type) throws IOException {
switch (type) {
@ -1401,7 +1403,7 @@ public final class ProtobufUtil {
/**
* Convert a protocol buffer DeleteType to delete KeyValue type.
* @param type The DeleteType
* @return The type. n
* @return The type.
*/
public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException {
switch (type) {
@ -1565,7 +1567,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
* @param b byte array n
* @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b) throws IOException {
final CodedInputStream codedInput = CodedInputStream.newInstance(b);
@ -1578,7 +1580,7 @@ public final class ProtobufUtil {
* This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when
* working with byte arrays
* @param builder current message builder
* @param b byte array nnn
* @param b byte array
*/
public static void mergeFrom(Message.Builder builder, byte[] b, int offset, int length)
throws IOException {
@ -1632,7 +1634,7 @@ public final class ProtobufUtil {
* magic and that is then followed by a protobuf that has a serialized
* {@link ServerName} in it.
* @return Returns null if <code>data</code> is null else converts passed data to a ServerName
* instance. n
* instance.
*/
public static ServerName toServerName(final byte[] data) throws DeserializationException {
if (data == null || data.length <= 0) return null;

View File

@ -34,8 +34,7 @@ public class LeaseException extends DoNotRetryIOException {
}
/**
* n
*/
* */
public LeaseException(String message) {
super(message);
}

View File

@ -33,8 +33,7 @@ public class FailedLogCloseException extends IOException {
}
/**
* n
*/
* */
public FailedLogCloseException(String msg) {
super(msg);
}

View File

@ -32,8 +32,7 @@ public class FailedSyncBeforeLogCloseException extends FailedLogCloseException {
}
/**
* n
*/
* */
public FailedSyncBeforeLogCloseException(String msg) {
super(msg);
}

View File

@ -50,7 +50,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param token token to use if needed by the authentication method
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
* @param fallbackAllowed does the client allow fallback to simple authentication n
* @param fallbackAllowed does the client allow fallback to simple authentication
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,
@ -66,7 +66,7 @@ public abstract class AbstractHBaseSaslRpcClient {
* @param serverAddr the address of the hbase service
* @param securityInfo the security details for the remote hbase service
* @param fallbackAllowed does the client allow fallback to simple authentication
* @param rpcProtection the protection level ("authentication", "integrity" or "privacy") n
* @param rpcProtection the protection level ("authentication", "integrity" or "privacy")
*/
protected AbstractHBaseSaslRpcClient(Configuration conf,
SaslClientAuthenticationProvider provider, Token<? extends TokenIdentifier> token,

Some files were not shown because too many files have changed in this diff Show More