From 55d6dcaf877cc5223e679736eb613173229c18be Mon Sep 17 00:00:00 2001 From: Jan Hentschel Date: Sun, 19 Mar 2017 20:49:28 +0100 Subject: [PATCH] HBASE-16084 Cleaned up the stale references in Javadoc Signed-off-by: tedyu --- .../apache/hadoop/hbase/HTableDescriptor.java | 14 +++++++------- .../hadoop/hbase/client/AsyncProcess.java | 6 +++--- .../hadoop/hbase/client/ConnectionFactory.java | 2 +- .../hadoop/hbase/client/MasterCallable.java | 2 +- .../org/apache/hadoop/hbase/client/Query.java | 2 +- .../hbase/client/coprocessor/package-info.java | 8 ++++---- .../FirstKeyValueMatchingQualifiersFilter.java | 2 +- .../hadoop/hbase/ipc/ServerRpcController.java | 4 ++-- .../hbase/zookeeper/RecoverableZooKeeper.java | 2 +- .../org/apache/hadoop/hbase/nio/ByteBuff.java | 2 +- .../apache/hadoop/hbase/util/OrderedBytes.java | 2 +- .../hadoop/hbase/HBaseCommonTestingUtility.java | 3 +-- .../codec/prefixtree/scanner/CellSearcher.java | 2 +- .../store/wal/ProcedureWALFormatReader.java | 3 +-- .../hadoop/hbase/backup/HFileArchiver.java | 2 -- .../backup/example/HFileArchiveManager.java | 3 ++- .../hadoop/hbase/backup/util/RestoreTool.java | 4 ++-- .../hadoop/hbase/constraint/Constraint.java | 8 ++++---- .../io/hfile/CompoundBloomFilterWriter.java | 2 +- .../hadoop/hbase/io/hfile/HFileBlock.java | 2 +- .../hadoop/hbase/io/hfile/HFileBlockIndex.java | 2 +- .../hadoop/hbase/mapreduce/RowCounter.java | 3 +-- .../hbase/master/TableNamespaceManager.java | 2 +- .../master/balancer/StochasticLoadBalancer.java | 4 ++-- .../MiniBatchOperationInProgress.java | 4 ++-- .../hbase/regionserver/StoreFileReader.java | 2 +- .../hadoop/hbase/regionserver/wal/FSHLog.java | 8 ++++---- .../regionserver/wal/SequenceIdAccounting.java | 6 +++--- .../hbase/regionserver/wal/SyncFuture.java | 4 ++-- .../CoprocessorWhitelistMasterObserver.java | 2 +- .../hbase/security/access/TableAuthManager.java | 2 +- .../hadoop/hbase/HBaseTestingUtility.java | 6 +++--- .../hadoop/hbase/TestMetaTableLocator.java | 8 ++++---- .../hbase/TestPartialResultsFromClientSide.java | 2 +- .../apache/hadoop/hbase/TestSerialization.java | 4 ++-- .../hbase/client/TestMultipleTimestamps.java | 2 +- ...stFirstKeyValueMatchingQualifiersFilter.java | 2 +- .../hadoop/hbase/io/hfile/TestSeekTo.java | 2 +- .../hbase/mapreduce/TestHFileOutputFormat2.java | 17 +++++++---------- .../hbase/mapreduce/TestImportExport.java | 2 +- .../hadoop/hbase/master/MockRegionServer.java | 4 ++-- .../hadoop/hbase/master/TestWarmupRegion.java | 2 +- .../hbase/procedure/TestProcedureMember.java | 4 ++-- .../regionserver/DataBlockEncodingTool.java | 2 +- .../hbase/regionserver/OOMERegionServer.java | 3 +-- .../TestRegionMergeTransactionOnCluster.java | 6 ------ .../apache/hadoop/hbase/util/LoadTestTool.java | 2 +- .../hadoop/hbase/util/MultiThreadedAction.java | 4 ++-- 48 files changed, 86 insertions(+), 100 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index a49cf1c96df..25fd8966b9b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -723,7 +723,7 @@ public class HTableDescriptor implements Comparable { /** * This sets the class associated with the region split policy which * determines when a region split should occur. The class used by - * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy} + * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy * @param clazz the class name */ public HTableDescriptor setRegionSplitPolicyClassName(String clazz) { @@ -734,7 +734,7 @@ public class HTableDescriptor implements Comparable { /** * This gets the class associated with the region split policy which * determines when a region split should occur. The class used by - * default is defined in {@link org.apache.hadoop.hbase.regionserver.RegionSplitPolicy} + * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy * * @return the class name of the region split policy for this table. * If this returns null, the default split policy is used. @@ -827,7 +827,7 @@ public class HTableDescriptor implements Comparable { /** * This sets the class associated with the flush policy which determines determines the stores * need to be flushed when flushing a region. The class used by default is defined in - * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy} + * org.apache.hadoop.hbase.regionserver.FlushPolicy. * @param clazz the class name */ public HTableDescriptor setFlushPolicyClassName(String clazz) { @@ -838,7 +838,7 @@ public class HTableDescriptor implements Comparable { /** * This gets the class associated with the flush policy which determines the stores need to be * flushed when flushing a region. The class used by default is defined in - * {@link org.apache.hadoop.hbase.regionserver.FlushPolicy} + * org.apache.hadoop.hbase.regionserver.FlushPolicy. * @return the class name of the flush policy for this table. If this returns null, the default * flush policy is used. */ @@ -1244,7 +1244,7 @@ public class HTableDescriptor implements Comparable { /** * Add a table coprocessor to this table. The coprocessor - * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver * or Endpoint. * It won't check if the class can be loaded or not. * Whether a coprocessor is loadable or not will be determined when @@ -1259,7 +1259,7 @@ public class HTableDescriptor implements Comparable { /** * Add a table coprocessor to this table. The coprocessor - * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver * or Endpoint. * It won't check if the class can be loaded or not. * Whether a coprocessor is loadable or not will be determined when @@ -1304,7 +1304,7 @@ public class HTableDescriptor implements Comparable { /** * Add a table coprocessor to this table. The coprocessor - * type must be {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} + * type must be org.apache.hadoop.hbase.coprocessor.RegionObserver * or Endpoint. * It won't check if the class can be loaded or not. * Whether a coprocessor is loadable or not will be determined when diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index a65d327255b..ba6b052a736 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -452,7 +452,7 @@ class AsyncProcess { /** * Only used w/useGlobalErrors ctor argument, for HTable backward compat. * @return Whether there were any errors in any request since the last time - * {@link #waitForAllPreviousOpsAndReset(List, String)} was called, or AP was created. + * {@link #waitForAllPreviousOpsAndReset(List, TableName)} was called, or AP was created. */ public boolean hasError() { return globalErrors != null && globalErrors.hasErrors(); @@ -463,9 +463,9 @@ class AsyncProcess { * Waits for all previous operations to finish, and returns errors and (optionally) * failed operations themselves. * @param failedRows an optional list into which the rows that failed since the last time - * {@link #waitForAllPreviousOpsAndReset(List, String)} was called, or AP was created, are saved. + * {@link #waitForAllPreviousOpsAndReset(List, TableName)} was called, or AP was created, are saved. * @param tableName name of the table - * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List, String)} + * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List, TableName)} * was called, or AP was created. */ public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset( diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 7cbcc20cd66..64f337ae91a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; * A non-instantiable class that manages creation of {@link Connection}s. Managing the lifecycle of * the {@link Connection}s to the cluster is the responsibility of the caller. From a * {@link Connection}, {@link Table} implementations are retrieved with - * {@link Connection#getTable(TableName)}. Example: + * {@link Connection#getTable(org.apache.hadoop.hbase.TableName)}. Example: * *
  * Connection connection = ConnectionFactory.createConnection(config);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
index 0b24bcd82da..ca4ae8fc60d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java
@@ -80,7 +80,7 @@ abstract class MasterCallable implements RetryingCallable, Closeable {
   }
 
   /**
-   * Override that changes the {@link Callable#call()} Exception from {@link Exception} to
+   * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from {@link Exception} to
    * {@link IOException}. It also does setup of an rpcController and calls through to the rpcCall()
    * method which callers are expected to implement. If rpcController is an instance of
    * PayloadCarryingRpcController, we will set a timeout on it.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
index 222eaff488a..1322ef5ffce 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java
@@ -56,7 +56,7 @@ public abstract class Query extends OperationWithAttributes {
 
   /**
    * Apply the specified server-side filter when performing the Query.
-   * Only {@link Filter#filterKeyValue(Cell)} is called AFTER all tests
+   * Only {@link Filter#filterKeyValue(org.apache.hadoop.hbase.Cell)} is called AFTER all tests
    * for ttl, column match, deletes and max versions have been run.
    * @param filter filter to run on the server
    * @return this for invocation chaining
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
index c70f27f0775..82f0c87eadc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/package-info.java
@@ -44,8 +44,8 @@ must:
  for more details on defining services.
  
  • Generate the Service and Message code using the protoc compiler
  • Implement the generated Service interface in your coprocessor class and implement the - {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} interface. The - {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService()} + org.apache.hadoop.hbase.coprocessor.CoprocessorService interface. The + org.apache.hadoop.hbase.coprocessor.CoprocessorService#getService() method should return a reference to the Endpoint's protocol buffer Service instance.

    @@ -146,10 +146,10 @@ public static abstract class RowCountService

  • -Our coprocessor Service will need to implement this interface and the {@link org.apache.hadoop.hbase.coprocessor.CoprocessorService} +Our coprocessor Service will need to implement this interface and the org.apache.hadoop.hbase.coprocessor.CoprocessorService in order to be registered correctly as an endpoint. For the sake of simplicity the server-side implementation is omitted. To see the implementing code, please see the -{@link org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint} class in the HBase source code. +org.apache.hadoop.hbase.coprocessor.example.RowCountEndpoint class in the HBase source code.

    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java index 6b202ad4742..4681fd3de14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FirstKeyValueMatchingQualifiersFilter.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; * Note : It may emit KVs which do not have the given columns in them, if * these KVs happen to occur before a KV which does have a match. Given this * caveat, this filter is only useful for special cases - * like {@link org.apache.hadoop.hbase.mapreduce.RowCounter}. + * like org.apache.hadoop.hbase.mapreduce.RowCounter. *

    * @deprecated Deprecated in 2.0. See HBASE-13347 */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index b899eb84991..a333d573c6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -54,8 +54,8 @@ import org.apache.hadoop.util.StringUtils; public class ServerRpcController implements RpcController { /** * The exception thrown within - * {@link com.google.protobuf.Service#callMethod( - * Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)} + * {@link com.google.protobuf.Service#callMethod(com.google.protobuf.Descriptors.MethodDescriptor, RpcController, + * com.google.protobuf.Message, RpcCallback)} * if any. */ // TODO: it would be good widen this to just Throwable, but IOException is what we allow now diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java index 43a5ad9187e..4f07d5b65a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java @@ -630,7 +630,7 @@ public class RecoverableZooKeeper { } } /** - * Convert Iterable of {@link ZKOp} we got into the ZooKeeper.Op + * Convert Iterable of {@link org.apache.zookeeper.Op} we got into the ZooKeeper.Op * instances to actually pass to multi (need to do this in order to appendMetaData). */ private Iterable prepareZKMulti(Iterable ops) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java index 60202a0fdf5..036c4e69aae 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/nio/ByteBuff.java @@ -492,7 +492,7 @@ public abstract class ByteBuff { } /** - * Similar to {@link WritableUtils#readVLong(DataInput)} but reads from a + * Similar to {@link WritableUtils#readVLong(java.io.DataInput)} but reads from a * {@link ByteBuff}. */ public static long readVLong(ByteBuff in) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java index a0c739095dc..a16756278ed 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/OrderedBytes.java @@ -341,7 +341,7 @@ public class OrderedBytes { /** * Perform unsigned comparison between two long values. Conforms to the same interface as - * {@link Comparator#compare(Object, Object)}. + * {@link org.apache.hadoop.hbase.CellComparator#COMPARATOR#compare(Object, Object)}. */ private static int unsignedCmp(long x1, long x2) { int cmp; diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java index 0bf8aea1331..fcad895501a 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java @@ -32,8 +32,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** * Common helpers for testing HBase that do not depend on specific server/etc. things. - * @see {@link HBaseTestingUtility} - * + * {@see org.apache.hadoop.hbase.HBaseTestingUtility} */ @InterfaceAudience.Public @InterfaceStability.Unstable diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java index 46684687a8c..aff30352498 100644 --- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java +++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java @@ -71,7 +71,7 @@ public interface CellSearcher extends ReversibleCellScanner { /** *

    * Note: Added for backwards compatibility with - * {@link org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek} + * org.apache.hadoop.hbase.regionserver.KeyValueScanner#reseek(Cell) *

    * Look for the key, but only look after the current position. Probably not needed for an * efficient tree implementation, but is important for implementations without random access such diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java index aeae5698e2c..4cc459b67b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -119,8 +119,7 @@ public class ProcedureWALFormatReader { * purpose. If all procedures updated in a WAL are found to be obsolete, it can be safely deleted. * (see {@link WALProcedureStore#removeInactiveLogs()}). * However, we don't need deleted part of a WAL's tracker for this purpose, so we don't bother - * re-building it. (To understand why, take a look at - * {@link ProcedureStoreTracker.BitSetNode#subtract(ProcedureStoreTracker.BitSetNode)}). + * re-building it. */ private ProcedureStoreTracker localTracker; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index 52185f19c57..ecd4401b413 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -466,8 +466,6 @@ public class HFileArchiver { *

    * A best effort is made to delete each of the files, rather than bailing on the first failure. *

    - * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes - * less resources, but is limited in terms of usefulness * @param compactedFiles store files to delete from the file system. * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before * throwing the exception, rather than failing at the first file. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java index 63d88ef1f6f..55e3e1a7a3a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/example/HFileArchiveManager.java @@ -67,7 +67,8 @@ class HFileArchiveManager { /** * Stop retaining HFiles for the given table in the archive. HFiles will be cleaned up on the next - * pass of the {@link HFileCleaner}, if the HFiles are retained by another cleaner. + * pass of the {@link org.apache.hadoop.hbase.master.cleaner.HFileCleaner}, if the HFiles are retained by another + * cleaner. * @param table name of the table for which to disable hfile retention. * @return this for chaining. * @throws KeeperException if if we can't reach zookeeper to update the hfile cleaner. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java index a130c213f9e..79adcabe326 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java @@ -283,7 +283,7 @@ public class RestoreTool { /** * Duplicate the backup image if it's on local cluster - * @see HStore#bulkLoadHFile(String, long) + * @see HStore#bulkLoadHFile(org.apache.hadoop.hbase.regionserver.StoreFile) * @see HRegionFileSystem#bulkLoadStoreFile(String familyName, Path srcPath, long seqNum) * @param tableArchivePath archive path * @return the new tableArchivePath @@ -554,7 +554,7 @@ public class RestoreTool { /** * Prepare the table for bulkload, most codes copied from - * {@link LoadIncrementalHFiles#createTable(String, String)} + * {@link LoadIncrementalHFiles#createTable(TableName, String, Admin)} * @param conn connection * @param tableBackupPath path * @param tableName table name diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java index a2711f2bedc..db8d2e9dd8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraint.java @@ -27,15 +27,15 @@ import org.apache.hadoop.hbase.client.Put; * any order. *

    * A {@link Constraint} must be added to a table before the table is loaded via - * {@link Constraints#add(HTableDescriptor, Class...)} or - * {@link Constraints#add(HTableDescriptor, + * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, Class[])} or + * {@link Constraints#add(org.apache.hadoop.hbase.HTableDescriptor, * org.apache.hadoop.hbase.util.Pair...)} * (if you want to add a configuration with the {@link Constraint}). Constraints * will be run in the order that they are added. Further, a Constraint will be * configured before it is run (on load). *

    - * See {@link Constraints#enableConstraint(HTableDescriptor, Class)} and - * {@link Constraints#disableConstraint(HTableDescriptor, Class)} for + * See {@link Constraints#enableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} and + * {@link Constraints#disableConstraint(org.apache.hadoop.hbase.HTableDescriptor, Class)} for * enabling/disabling of a given {@link Constraint} after it has been added. *

    * If a {@link Put} is invalid, the Constraint should throw some sort of diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java index 96dfcbdce68..6c7ac371a51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilterWriter.java @@ -248,7 +248,7 @@ public class CompoundBloomFilterWriter extends CompoundBloomFilterBase } /** - * This is modeled after {@link BloomFilterChunk.MetaWriter} for simplicity, + * This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity, * although the two metadata formats do not have to be consistent. This * does have to be consistent with how {@link * CompoundBloomFilter#CompoundBloomFilter(DataInput, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 0b140b6e301..4711cec8fe3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -697,7 +697,7 @@ public class HFileBlock implements Cacheable { } /** - * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link CacheKey} when + * Cannot be {@link #UNSET}. Must be a legitimate value. Used re-making the {@link BlockCacheKey} when * block is returned to the cache. * @return the offset of this block in the file it was read from */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index b36c292e797..ae7bfda7529 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -61,7 +61,7 @@ import org.apache.hadoop.util.StringUtils; * {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and * {@link HFileWriterImpl}. Examples of how to use the reader can be * found in {@link HFileReaderImpl} and - * {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}. + * org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex. */ @InterfaceAudience.Private public class HFileBlockIndex { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 47651afc6be..46d29eb4300 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -76,8 +76,7 @@ public class RowCounter extends Configured implements Tool { * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. - * @see org.apache.hadoop.mapreduce.Mapper#map(KEYIN, VALUEIN, - * org.apache.hadoop.mapreduce.Mapper.Context) + * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override public void map(ImmutableBytesWritable row, Result values, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java index 2f06972a7b6..7582d424b40 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java @@ -212,7 +212,7 @@ public class TableNamespaceManager { /** * Create Namespace in a blocking manner. Keeps trying until - * {@link ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires. + * {@link ClusterSchema#HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires. * Note, by-passes notifying coprocessors and name checks. Use for system namespaces only. */ private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index 5c92973c509..59ea067abdf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -980,8 +980,8 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { abstract double cost(); /** - * Function to compute a scaled cost using {@link DescriptiveStatistics}. It - * assumes that this is a zero sum set of costs. It assumes that the worst case + * Function to compute a scaled cost using {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics}. + * It assumes that this is a zero sum set of costs. It assumes that the worst case * possible is all of the elements in one region server and the rest having 0. * * @param stats the costs diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java index 1ab2ef58da8..e9458d429d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java @@ -25,9 +25,9 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit; * Wraps together the mutations which are applied as a batch to the region and their operation * status and WALEdits. * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate( - * ObserverContext, MiniBatchOperationInProgress) + * org.apache.hadoop.hbase.coprocessor.ObserverContext, MiniBatchOperationInProgress) * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate( - * ObserverContext, MiniBatchOperationInProgress) + * org.apache.hadoop.hbase.coprocessor.ObserverContext, MiniBatchOperationInProgress) * @param T Pair<Mutation, Integer> pair of Mutations and associated rowlock ids . */ @InterfaceAudience.LimitedPrivate("Coprocessors") diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index d91e79ea7a0..8f01a93a973 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -208,7 +208,7 @@ public class StoreFileReader { * Checks whether the given scan passes the Bloom filter (if present). Only * checks Bloom filters for single-row or single-row-column scans. Bloom * filter checking for multi-gets is implemented as part of the store - * scanner system (see {@link StoreFileScanner#seekExactly}) and uses + * scanner system (see {@link StoreFileScanner#seek(Cell)} and uses * the lower-level API {@link #passesGeneralRowBloomFilter(byte[], int, int)} * and {@link #passesGeneralRowColBloomFilter(Cell)}. * diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index f0e29c127d3..caf07a287d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -244,8 +244,8 @@ public class FSHLog extends AbstractFSWAL { /** * Currently, we need to expose the writer's OutputStream to tests so that they can manipulate the * default behavior (such as setting the maxRecoveryErrorCount value for example (see - * {@link AbstractTestWALReplay#testReplayEditsWrittenIntoWAL()}). This is done using reflection - * on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is + * {@see org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay#testReplayEditsWrittenIntoWAL()}). This is + * done using reflection on the underlying HDFS OutputStream. NOTE: This could be removed once Hadoop1 support is * removed. * @return null if underlying stream is not ready. */ @@ -809,9 +809,9 @@ public class FSHLog extends AbstractFSWAL { * To start up the drama, Thread A creates an instance of this class each time it would do this * zigzag dance and passes it to Thread B (these classes use Latches so it is one shot only). * Thread B notices the new instance (via reading a volatile reference or how ever) and it starts - * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint()} when it cannot proceed + * to work toward the 'safe point'. Thread A calls {@link #waitSafePoint(SyncFuture)} when it cannot proceed * until the Thread B 'safe point' is attained. Thread A will be held inside in - * {@link #waitSafePoint()} until Thread B reaches the 'safe point'. Once there, Thread B frees + * {@link #waitSafePoint(SyncFuture)} until Thread B reaches the 'safe point'. Once there, Thread B frees * Thread A by calling {@link #safePointAttained()}. Thread A now knows Thread B is at the 'safe * point' and that it is holding there (When Thread B calls {@link #safePointAttained()} it blocks * here until Thread A calls {@link #releaseSafePoint()}). Thread A proceeds to do what it needs diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java index cd73eb3e21f..b065a593453 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java @@ -61,7 +61,7 @@ class SequenceIdAccounting { * {@link #flushingSequenceIds}. * *

    The two Maps are tied by this locking object EXCEPT when we go to update the lowest - * entry; see {@link #lowest(byte[], Set, Long)}. In here is a putIfAbsent call on + * entry; see {@link #lowestUnflushedSequenceIds}. In here is a putIfAbsent call on * {@link #lowestUnflushedSequenceIds}. In this latter case, we will add this lowest * sequence id if we find that there is no entry for the current column family. There will be no * entry only if we just came up OR we have moved aside current set of lowest sequence ids @@ -403,8 +403,8 @@ class SequenceIdAccounting { /** * Iterates over the given Map and compares sequence ids with corresponding entries in - * {@link #oldestUnflushedRegionSequenceIds}. If a region in - * {@link #oldestUnflushedRegionSequenceIds} has a sequence id less than that passed in + * {@link #lowestUnflushedSequenceIds}. If a region in + * {@link #lowestUnflushedSequenceIds} has a sequence id less than that passed in * sequenceids then return it. * @param sequenceids Sequenceids keyed by encoded region name. * @return regions found in this instance with sequence ids less than those passed in. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java index bc2e62ee192..d11fbe7b513 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java @@ -77,7 +77,7 @@ class SyncFuture { * Call this method to clear old usage and get it ready for new deploy. * @param txid the new transaction id * @param span current span, detached from caller. Don't forget to attach it when resuming after a - * call to {@link #get()}. + * call to {@link #get(long)}. * @return this */ synchronized SyncFuture reset(final long txid, Span span) { @@ -107,7 +107,7 @@ class SyncFuture { /** * Retrieve the {@code span} instance from this Future. EventHandler calls this method to continue * the span. Thread waiting on this Future musn't call this method until AFTER calling - * {@link #get()} and the future has been released back to the originating thread. + * {@link #get(long)} and the future has been released back to the originating thread. */ synchronized Span getSpan() { return this.span; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java index 649fbddc1fb..57715934ab3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/CoprocessorWhitelistMasterObserver.java @@ -75,7 +75,7 @@ public class CoprocessorWhitelistMasterObserver implements MasterObserver { * 1) a "*" to wildcard all coprocessor paths * 2) a specific filesystem (e.g. hdfs://my-cluster/) * 3) a wildcard path to be evaluated by - * {@link FilenameUtils.wildcardMatch} + * {@link FilenameUtils#wildcardMatch(String, String)} * path can specify scheme or not (e.g. * "file:///usr/hbase/coprocessors" or for all * filesystems "/usr/hbase/coprocessors") diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java index 0d539ced5a3..84cda91df9e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java @@ -85,7 +85,7 @@ public class TableAuthManager implements Closeable { /** * Returns a combined map of user and group permissions, with group names - * distinguished according to {@link AuthUtil.isGroupPrincipal} + * distinguished according to {@link AuthUtil#isGroupPrincipal(String)}. */ public ListMultimap getAllPermissions() { ListMultimap tmp = ArrayListMultimap.create(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 8a4ed7271d5..696ea18afff 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2059,7 +2059,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** A tracker for tracking and validating table rows - * generated with {@link HBaseTestingUtility#loadTable(HTable, byte[])} + * generated with {@link HBaseTestingUtility#loadTable(Table, byte[])} */ public static class SeenRowTracker { int dim = 'z' - 'a' + 1; @@ -2326,7 +2326,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return digest.toString(); } - /** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */ + /** All the row values for the data loaded by {@link #loadTable(Table, byte[])} */ public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB static { int i = 0; @@ -3110,7 +3110,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Waits for a table to be 'enabled'. Enabled means that table is set as 'enabled' and the * regions have been all assigned. - * @see #waitTableEnabled(Admin, byte[], long) + * @see #waitTableEnabled(TableName, long) * @param table Table to wait on. * @param timeoutMillis Time to wait on it being marked enabled. * @throws InterruptedException diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java index 4a0b5c9161f..8bebd8d7d7c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java @@ -301,12 +301,12 @@ public class TestMetaTableLocator { * want to pass a mocked HRS; can be null. * @param client A mocked ClientProtocol instance, can be null * @return Mock up a connection that returns a {@link Configuration} when - * {@link HConnection#getConfiguration()} is called, a 'location' when - * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called, + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getConfiguration()} is called, a 'location' when + * {@link org.apache.hadoop.hbase.client.RegionLocator#getRegionLocation(byte[], boolean)} is called, * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when - * {@link HConnection#getAdmin(ServerName)} is called, returns the passed + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClientProtos.ClientService.BlockingInterface} instance when - * {@link HConnection#getClient(ServerName)} is called. + * {@link org.apache.hadoop.hbase.client.ClusterConnection#getClient(ServerName)} is called. * @throws IOException */ private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index f3ea81411af..96bc81133e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -447,7 +447,7 @@ public class TestPartialResultsFromClientSide { } /** - * Test the method {@link Result#createCompleteResult(List)} + * Test the method {@link Result#createCompleteResult(Iterable)} * @throws Exception */ @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java index 88f5cc475d1..be033e99211 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestSerialization.java @@ -571,7 +571,7 @@ public class TestSerialization { protected static final byte [][] COLUMNS = {fam1, fam2, fam3}; /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @return Column descriptor. @@ -581,7 +581,7 @@ public class TestSerialization { } /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @param versions How many versions to allow per column. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java index a25c4aff703..807d59afe66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultipleTimestamps.java @@ -41,7 +41,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; /** - * Run tests related to {@link TimestampsFilter} using HBase client APIs. + * Run tests related to {@link org.apache.hadoop.hbase.filter.TimestampsFilter} using HBase client APIs. * Sets up the HBase mini cluster once at start. Each creates a table * named for the method and does its stuff against that. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java index dbda361ccf0..60e35146cee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFirstKeyValueMatchingQualifiersFilter.java @@ -40,7 +40,7 @@ public class TestFirstKeyValueMatchingQualifiersFilter extends TestCase { /** * Test the functionality of - * {@link FirstKeyValueMatchingQualifiersFilter#filterKeyValue(KeyValue)} + * {@link FirstKeyValueMatchingQualifiersFilter#filterKeyValue(org.apache.hadoop.hbase.Cell)} * * @throws Exception */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index d654bce13ec..6531d2c0071 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -55,7 +55,7 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; /** - * Test {@link HFileScanner#seekTo(byte[])} and its variants. + * Test {@link HFileScanner#seekTo(Cell)} and its variants. */ @Category({IOTests.class, SmallTests.class}) @RunWith(Parameterized.class) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 3c1bed8b675..20fc9923bc4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -113,7 +113,7 @@ import org.junit.rules.TestRule; import org.mockito.Mockito; /** - * Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}. + * Simple test for {@link HFileOutputFormat2}. * Sets up and runs a mapreduce job that writes hfile output. * Creates a few inner classes to implement splits and an inputformat that * emits keys and values like those of {@link PerformanceEvaluation}. @@ -684,9 +684,8 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#configureCompression(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyCompressionMap - * (Configuration)}. + * Test for {@link HFileOutputFormat2#configureCompression(Configuration, HTableDescriptor)} and + * {@link HFileOutputFormat2#createFamilyCompressionMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * @@ -754,9 +753,8 @@ public class TestHFileOutputFormat2 { /** - * Test for {@link HFileOutputFormat2#configureBloomType(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap - * (Configuration)}. + * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and + * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * @@ -824,9 +822,8 @@ public class TestHFileOutputFormat2 { } /** - * Test for {@link HFileOutputFormat2#configureBlockSize(org.apache.hadoop.hbase.client.Table, - * Configuration)} and {@link HFileOutputFormat2#createFamilyBlockSizeMap - * (Configuration)}. + * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and + * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. * Tests that the compression map is correctly serialized into * and deserialized from configuration * diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 1866a35e228..dc5981790fd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -700,7 +700,7 @@ public class TestImportExport { } /** - * This listens to the {@link #visitLogEntryBeforeWrite(HTableDescriptor, WALKey, WALEdit)} to + * This listens to the {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} to * identify that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener extends WALActionsListener.Base { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java index b653e3fd070..78c82141a47 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java @@ -139,7 +139,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { /** * Map of regions to map of rows and {@link Result}. Used as data source when - * {@link MockRegionServer#get(byte[], Get)} is called. Because we have a byte + * {@link #get(RpcController, GetRequest)} is called. Because we have a byte * key, need to use TreeMap and provide a Comparator. Use * {@link #setGetResult(byte[], byte[], Result)} filling this map. */ @@ -190,7 +190,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices { } /** - * Use this method filling the backing data source used by {@link #get(byte[], Get)} + * Use this method filling the backing data source used by {@link #get(RpcController, GetRequest)} * @param regionName * @param row * @param r diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java index f33bc9815ae..ec7ffe63a88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestWarmupRegion.java @@ -46,7 +46,7 @@ import org.junit.After; import org.junit.Test; /** - * Run tests that use the HBase clients; {@link HTable}. + * Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.HTable}. * Sets up the HBase mini cluster once at start and runs through all client tests. * Each creates a table named for the method and does its stuff against that. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java index 0d31108f34b..e2e641ea590 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/TestProcedureMember.java @@ -224,8 +224,8 @@ public class TestProcedureMember { /** * Fail correctly if coordinator aborts the procedure. The subprocedure will not interrupt a - * running {@link Subprocedure#prepare} -- prepare needs to finish first, and the the abort - * is checked. Thus, the {@link Subprocedure#prepare} should succeed but later get rolled back + * running {@link Subprocedure#acquireBarrier()} -- prepare needs to finish first, and the the abort + * is checked. Thus, the {@link Subprocedure#acquireBarrier()} should succeed but later get rolled back * via {@link Subprocedure#cleanup}. */ @Test(timeout = 60000) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java index dd202591f83..a074a9abf66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/DataBlockEncodingTool.java @@ -69,7 +69,7 @@ public class DataBlockEncodingTool { /** * How many times to run the benchmark. More times means better data in terms * of statistics but slower execution. Has to be strictly larger than - * {@link DEFAULT_BENCHMARK_N_OMIT}. + * {@link #DEFAULT_BENCHMARK_N_OMIT}. */ private static final int DEFAULT_BENCHMARK_N_TIMES = 12; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java index 036c11c8cd0..07c141c9671 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java @@ -24,7 +24,6 @@ import java.util.List; import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; @@ -32,7 +31,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequ /** * A region server that will OOME. - * Everytime {@link #put(regionName, Durability)} is called, we add + * Everytime {@link #put(byte[], Put)} is called, we add * keep around a reference to the batch. Use this class to test OOME extremes. * Needs to be started manually as in * ${HBASE_HOME}/bin/hbase ./bin/hbase org.apache.hadoop.hbase.OOMERegionServer start. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 358aabd74c7..c04f2d4f900 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -88,12 +88,6 @@ import com.google.common.base.Joiner; import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -/** - * Like {@link TestRegionMergeTransaction} in that we're testing - * {@link RegionMergeTransactionImpl} only the below tests are against a running - * cluster where {@link TestRegionMergeTransaction} is tests against bare - * {@link HRegion}. - */ @Category({RegionServerTests.class, MediumTests.class}) public class TestRegionMergeTransactionOnCluster { private static final Log LOG = LogFactory diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 9a5e6f1cf24..27c4282bb94 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -64,7 +64,7 @@ import org.apache.hadoop.util.ToolRunner; /** * A command-line utility that reads, writes, and verifies data. Unlike - * {@link PerformanceEvaluation}, this tool validates the data written, + * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, * and supports simultaneously writing and reading the same set of keys. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java index 2b687191ec4..e4e0af565c0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MultiThreadedAction.java @@ -78,8 +78,8 @@ public abstract class MultiThreadedAction { * Default implementation of LoadTestDataGenerator that uses LoadTestKVGenerator, fixed * set of column families, and random number of columns in range. The table for it can * be created manually or, for example, via - * {@link HBaseTestingUtility#createPreSplitLoadTestTable( - * org.apache.hadoop.hbase.Configuration, byte[], byte[], Algorithm, DataBlockEncoding)} + * {@link org.apache.hadoop.hbase.HBaseTestingUtility#createPreSplitLoadTestTable(Configuration, TableName, byte[], + * org.apache.hadoop.hbase.io.compress.Compression.Algorithm, org.apache.hadoop.hbase.io.encoding.DataBlockEncoding)} */ public static class DefaultDataGenerator extends LoadTestDataGenerator { private byte[][] columnFamilies = null;