diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
index d0d23b618f1..338b3752e93 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
@@ -27,10 +27,14 @@ import java.lang.annotation.RetentionPolicy;
* class or method not changing over time. Currently the stability can be
* {@link Stable}, {@link Evolving} or {@link Unstable}.
*
- *
* Takes two generic parameters and three Message parameters.
* The cell value type of the interpreter is
* Multiple filters can be combined using {@link FilterList}.
*
- * If an already known column qualifier is looked for, use {@link Get#addColumn}
+ * If an already known column qualifier is looked for,
+ * use {@link org.apache.hadoop.hbase.client.Get#addColumn}
* directly rather than a filter.
*/
@InterfaceAudience.Public
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
index 9cc28f7cdb0..cb4337e28df 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java
@@ -40,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
*
* Multiple filters can be combined using {@link FilterList}.
*
- * If an already known row range needs to be scanned, use {@link Scan} start
+ * If an already known row range needs to be scanned,
+ * use {@link org.apache.hadoop.hbase.CellScanner} start
* and stop rows directly rather than a filter.
*/
@InterfaceAudience.Public
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
index 897f02989c0..d905868301a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java
@@ -52,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
* long value), then you can pass in your own comparator instead.
*
* You must also specify a family and qualifier. Only the value of this column
- * will be tested. When using this filter on a {@link Scan} with specified
+ * will be tested. When using this filter on a
+ * {@link org.apache.hadoop.hbase.CellScanner} with specified
* inputs, the column to be tested should also be added as input (otherwise
* the filter will regard the column as missing).
*
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
index f3d81f7cb91..8ec1517b51a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java
@@ -36,8 +36,8 @@ import com.google.protobuf.ServiceException;
/**
* Base class which provides clients with an RPC connection to
- * call coprocessor endpoint {@link Service}s. Note that clients should not use this class
- * directly, except through
+ * call coprocessor endpoint {@link com.google.protobuf.Service}s.
+ * Note that clients should not use this class directly, except through
* {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
*/
@InterfaceAudience.Public
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
index 01661ed6cbd..46356f83b41 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java
@@ -23,7 +23,8 @@ import org.apache.hadoop.ipc.RemoteException;
/**
* A {@link RemoteException} with some extra information. If source exception
- * was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true.
+ * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException},
+ * {@link #isDoNotRetry()} will return true.
* A {@link RemoteException} hosts exceptions we got from the server.
*/
@SuppressWarnings("serial")
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
index b9b31f9d51a..5511cb1b0e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java
@@ -29,11 +29,11 @@ import com.google.protobuf.RpcController;
/**
* Used for server-side protobuf RPC service invocations. This handler allows
* invocation exceptions to easily be passed through to the RPC server from coprocessor
- * {@link Service} implementations.
+ * {@link com.google.protobuf.Service} implementations.
*
*
- * When implementing {@link Service} defined methods, coprocessor endpoints can use the following
- * pattern to pass exceptions back to the RPC client:
+ * When implementing {@link com.google.protobuf.Service} defined methods,
+ * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
* Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based
+ * Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
+ * only Cell-based rather than KeyValue version 1 based
* and without presuming an hfile context. Intent is an Interface that will work for hfile and
* rpc.
*/
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
public interface Codec {
- // TODO: interfacing with {@link DataBlockEncoder}
+ // TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
/**
* Call flush when done. Some encoders may not put anything on the stream until flush is called.
* On flush, let go of any resources used by the encoder.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
index 3c69d9869a3..34f1bf73d69 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* and flushes, or to build a byte[] to send to the client. This could be backed by a
* List To read Cells, use {@link CellScanner}
- * @see CellScanner
+ * To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
+ * @see org.apache.hadoop.hbase.CellScanner
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
index da434d23835..2f98ebf52d8 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
* qualifiers.
*
- * {@code DataType}s are different from Hadoop {@link Writable}s in two
+ * {@code DataType}s are different from Hadoop
+ * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two
* significant ways. First, {@code DataType} describes how to serialize a
* value, it does not encapsulate a serialized value. Second, {@code DataType}
* implementations provide hints to consumers about relationships between the
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
index 90697dba356..bfd64160886 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java
@@ -24,9 +24,11 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
/**
* An {@code DataType} that encodes fixed-length values encoded using
- * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it
- * easier to transition away from direct use of {@link Bytes}.
- * @see Bytes#putBytes(byte[], int, byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(
+ * byte[], int, byte[], int, int)}. Intended to make it
+ * easier to transition away from direct use of
+ * {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
* @see RawBytes
* @see OrderedBlob
* @see OrderedBlobVar
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
index 292318b3d37..8bc4c2074c7 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java
@@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
/**
* An {@code DataType} that encodes variable-length values encoded using
- * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a
- * termination marker following the raw {@code byte[]} value. Intended to
- * make it easier to transition away from direct use of {@link Bytes}.
- * @see Bytes#putBytes(byte[], int, byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}.
+ * Includes a termination marker following the raw {@code byte[]} value. Intended to make it easier
+ * to transition away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
* @see RawBytes
* @see OrderedBlob
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
index c5774c0a94d..d11bead3c32 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java
@@ -23,10 +23,11 @@ import org.apache.hadoop.hbase.util.Order;
/**
* An {@code DataType} that encodes fixed-length values encoded using
- * {@link Bytes#toBytes(String)}. Intended to make it easier to transition
- * away from direct use of {@link Bytes}.
- * @see Bytes#toBytes(String)
- * @see Bytes#toString(byte[], int, int)
+ * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
+ * Intended to make it easier to transition away from direct use of
+ * {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
+ * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
* @see RawString
*/
@InterfaceAudience.Public
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
index a954ec31ac6..4d89d5b4dfe 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java
@@ -23,11 +23,12 @@ import org.apache.hadoop.hbase.util.Order;
/**
* An {@code DataType} that encodes variable-length values encoded using
- * {@link Bytes#toBytes(String)}. Includes a termination marker following the
+ * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
+ * Includes a termination marker following the
* raw {@code byte[]} value. Intended to make it easier to transition
- * away from direct use of {@link Bytes}.
- * @see Bytes#toBytes(String)
- * @see Bytes#toString(byte[], int, int)
+ * away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
+ * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
+ * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
* @see RawString
* @see OrderedString
*/
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
index bc09988b0c4..8d3d0cf9d90 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java
@@ -35,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
PositionedByteRange {
/**
- * The current index into the range. Like {@link ByteBuffer} position, it
+ * The current index into the range. Like {@link java.nio.ByteBuffer} position, it
* points to the next value that will be read/written in the array. It
* provides the appearance of being 0-indexed, even though its value is
* calculated according to offset.
@@ -182,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
@Override
public abstract int putVLong(long val);
/**
- * Similar to {@link ByteBuffer#flip()}. Sets length to position, position to
+ * Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to
* offset.
*/
@VisibleForTesting
@@ -194,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
}
/**
- * Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to
+ * Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to
* capacity.
*/
@VisibleForTesting
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
index 46e583a30ca..3b4a1f1c08d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java
@@ -33,7 +33,7 @@ import com.google.common.base.Supplier;
/**
* A simple concurrent map of sets. This is similar in concept to
- * {@link Multiset}, with the following exceptions:
+ * {@link com.google.common.collect.Multiset}, with the following exceptions:
*
- * Designed to be a slimmed-down, mutable alternative to {@link ByteBuffer}.
+ * Designed to be a slimmed-down, mutable alternative to {@link java.nio.ByteBuffer}.
*
- * This only works properly if the {@link TimeToLiveHFileCleaner} is also enabled (it always should
- * be), since it may take a little time for the ZK notification to propagate, in which case we may
- * accidentally delete some files.
+ * This only works properly if the
+ * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
+ * is also enabled (it always should be), since it may take a little time
+ * for the ZK notification to propagate, in which case we may accidentally
+ * delete some files.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
index 2ffdf70ceb5..baf2aa6112e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/TableSnapshotScanner.java
@@ -49,7 +49,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
*
* This also allows one to run the scan from an
* online or offline hbase cluster. The snapshot files can be exported by using the
- * {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this scanner can be used to
+ * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
+ * to a pure-hdfs cluster, and this scanner can be used to
* run the scan directly over the snapshot files. The snapshot should not be deleted while there
* are open scanners reading from snapshot files.
*
@@ -65,7 +66,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* snapshot files, the job has to be run as the HBase user or the user must have group or other
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
* snapshot/data files will completely circumvent the access control enforced by HBase.
- * @see TableSnapshotInputFormat
+ * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
index 49a7a4bd53a..31746b689d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/ConstraintException.java
@@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.constraint;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
- * Exception that a user defined constraint throws on failure of a {@link Put}.
- *
- * Does NOT attempt the {@link Put} multiple times, since the constraint
- * Does NOT attempt the
+ * {@link org.apache.hadoop.hbase.client.Put} multiple times,
+ * since the constraint
* Methods required for task life circle:
* A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
- * {@link SplitLogWorker}s to rescan for new tasks.
+ * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}s to rescan for new tasks.
*/
public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 3dcde668844..9ea6bd70c1d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -583,8 +583,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
* Next part is related to WALSplitterHandler
*/
/**
- * endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to
- * timeout the task node.
+ * endTask() can fail and the only way to recover out of it is for the
+ * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
* @param slt
* @param ctr
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 21c01ecb3c4..9fede5258a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -212,8 +212,9 @@ public interface RegionObserver extends Coprocessor {
* options:
*
*
- * Contains three levels of block priority to allow for
- * scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An
- * in-memory column family is a column family that should be served from memory if possible):
+ * Contains three levels of block priority to allow for scan-resistance and in-memory families
+ * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column
+ * family is a column family that should be served from memory if possible):
* single-access, multiple-accesses, and in-memory priority.
* A block is added with an in-memory priority flag if
- * {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access
- * priority the first time it is read into this block cache. If a block is accessed again while
- * in cache, it is marked as a multiple access priority block. This delineation of blocks is used
- * to prevent scans from thrashing the cache adding a least-frequently-used
- * element to the eviction algorithm.
+ * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a
+ * single access priority the first time it is read into this block cache. If a block is
+ * accessed again while in cache, it is marked as a multiple access priority block. This
+ * delineation of blocks is used to prevent scans from thrashing the cache adding a
+ * least-frequently-used element to the eviction algorithm.
*
* Each priority is given its own chunk of the total cache to ensure
* fairness during eviction. Each priority will retain close to its maximum
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index f1310a9d9ee..d3b303adf73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -82,8 +82,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
*
* BucketCache can be used as mainly a block cache (see
- * {@link CombinedBlockCache}), combined with LruBlockCache to decrease CMS GC and
- * heap fragmentation.
+ * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
+ * LruBlockCache to decrease CMS GC and heap fragmentation.
*
* It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
* blocks) to enlarge cache space via
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
index b75e4a67ac4..fd9a60cb13f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
/**
* A job with a map to count rows.
* Map outputs table rows IF the input row has columns that have content.
- * Uses an {@link IdentityReducer}
+ * Uses a org.apache.hadoop.mapred.lib.IdentityReducer
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index d2d754af9c9..fbfd984354c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -103,9 +103,11 @@ implements InputFormat
* Usage is similar to TableInputFormat, and
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
@@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
*
* Internally, this input format restores the snapshot into the given tmp directory. Similar to
* {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
- * from each RecordReader. An internal RegionScanner is used to execute the {@link Scan} obtained
- * from the user.
+ * from each RecordReader. An internal RegionScanner is used to execute the
+ * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user.
*
* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
* snapshot files and data files.
@@ -79,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting;
* user or the user must have group or other privileges in the filesystem (See HBASE-8369).
* Note that, given other users access to read from snapshot/data files will completely circumvent
* the access control enforced by HBase.
- * @see TableSnapshotScanner
+ * @see org.apache.hadoop.hbase.client.TableSnapshotScanner
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6c073d79140..e5d63b2df4f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -150,7 +150,7 @@ import com.google.protobuf.Service;
*
* You can also shutdown just this master. Call {@link #stopMaster()}.
*
- * @see Watcher
+ * @see org.apache.zookeeper.Watcher
*/
@InterfaceAudience.Private
@SuppressWarnings("deprecation")
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 11ac7c6be44..bc798cdfb14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -74,8 +74,9 @@ import com.google.common.annotations.VisibleForTesting;
* SplitLogManager monitors the tasks that it creates using the
* timeoutMonitor thread. If a task's progress is slow then
* {@link SplitLogManagerCoordination#checkTasks} will take away the
- * task from the owner {@link SplitLogWorker} and the task will be up for grabs again. When the
- * task is done then it is deleted by SplitLogManager.
+ * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
+ * and the task will be up for grabs again. When the task is done then it is
+ * deleted by SplitLogManager.
*
* Clients call {@link #splitLogDistributed(Path)} to split a region server's
* log files. The caller thread waits in this method until all the log files
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 6d7ed316a69..13854449719 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -60,8 +60,9 @@ import com.google.common.collect.Sets;
/**
* The base class for load balancers. It provides the the functions used to by
- * {@link AssignmentManager} to assign regions in the edge cases. It doesn't
- * provide an implementation of the actual balancing algorithm.
+ * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
+ * in the edge cases. It doesn't provide an implementation of the
+ * actual balancing algorithm.
*
*/
public abstract class BaseLoadBalancer implements LoadBalancer {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
index c1209c8e699..6db82a5f62a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/FavoredNodeLoadBalancer.java
@@ -41,16 +41,17 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position;
import org.apache.hadoop.hbase.util.Pair;
/**
- * An implementation of the {@link LoadBalancer} that assigns favored nodes for
- * each region. There is a Primary RegionServer that hosts the region, and then
- * there is Secondary and Tertiary RegionServers. Currently, the favored nodes
- * information is used in creating HDFS files - the Primary RegionServer passes
- * the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem
- * API for creating files on the filesystem. These nodes are treated as hints by
- * the HDFS to place the blocks of the file. This alleviates the problem to do with
- * reading from remote nodes (since we can make the Secondary RegionServer as the new
- * Primary RegionServer) after a region is recovered. This should help provide consistent
- * read latencies for the regions even when their primary region servers die.
+ * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that
+ * assigns favored nodes for each region. There is a Primary RegionServer that hosts
+ * the region, and then there is Secondary and Tertiary RegionServers. Currently, the
+ * favored nodes information is used in creating HDFS files - the Primary RegionServer
+ * passes the primary, secondary, tertiary node addresses as hints to the
+ * DistributedFileSystem API for creating files on the filesystem. These nodes are
+ * treated as hints by the HDFS to place the blocks of the file. This alleviates the
+ * problem to do with reading from remote nodes (since we can make the Secondary
+ * RegionServer as the new Primary RegionServer) after a region is recovered. This
+ * should help provide consistent read latencies for the regions even when their
+ * primary region servers die.
*
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 164c418166e..9673acfe53e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -51,7 +51,8 @@ import com.google.common.collect.MinMaxPriorityQueue;
* On cluster startup, bulk assignment can be used to determine
* locations for all Regions in a cluster.
*
- * This classes produces plans for the {@link AssignmentManager} to execute.
+ * This classes produces plans for the
+ * {@link org.apache.hadoop.hbase.master.AssignmentManager} to execute.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SimpleLoadBalancer extends BaseLoadBalancer {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
index 0655f1f5012..008a04e1763 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/LogReplayHandler.java
@@ -33,9 +33,10 @@ import org.apache.hadoop.hbase.master.MasterServices;
/**
* Handle logReplay work from SSH. Having a separate handler is not to block SSH in re-assigning
* regions from dead servers. Otherwise, available SSH handlers could be blocked by logReplay work
- * (from {@link MasterFileSystem#splitLog(ServerName)}). During logReplay, if a receiving RS(say A)
- * fails again, regions on A won't be able to be assigned to another live RS which causes the log
- * replay unable to complete because WAL edits replay depends on receiving RS to be live
+ * (from {@link org.apache.hadoop.hbase.master.MasterFileSystem#splitLog(ServerName)}).
+ * During logReplay, if a receiving RS(say A) fails again, regions on A won't be able
+ * to be assigned to another live RS which causes the log replay unable to complete
+ * because WAL edits replay depends on receiving RS to be live
*/
@InterfaceAudience.Private
public class LogReplayHandler extends EventHandler {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
index c181e27ebca..1a30de419da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TotesHRegionInfo.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
/**
* Implementors tote an HRegionInfo instance.
- * This is a marker interface that can be put on {@link EventHandler}s that
+ * This is a marker interface that can be put on {@link java.beans.EventHandler}s that
* have an {@link HRegionInfo}.
*/
@InterfaceAudience.Private
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index df0b94b15c7..b21f4e72a13 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -98,7 +98,8 @@ public final class MasterSnapshotVerifier {
/**
* Verify that the snapshot in the directory is a valid snapshot
* @param snapshotDir snapshot directory to check
- * @param snapshotServers {@link ServerName} of the servers that are involved in the snapshot
+ * @param snapshotServers {@link org.apache.hadoop.hbase.ServerName} of the servers
+ * that are involved in the snapshot
* @throws CorruptedSnapshotException if the snapshot is invalid
* @throws IOException if there is an unexpected connection issue to the filesystem
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
index 98f9b11fa6f..8f866f63228 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManager.java
@@ -34,20 +34,23 @@ import org.apache.zookeeper.KeeperException;
*
* To implement a custom globally barriered procedure, user needs to extend two classes:
* {@link MasterProcedureManager} and {@link RegionServerProcedureManager}. Implementation of
-* {@link MasterProcedureManager} is loaded into {@link HMaster} process via configuration
-* parameter 'hbase.procedure.master.classes', while implementation of
-* {@link RegionServerProcedureManager} is loaded into {@link HRegionServer} process via
+* {@link MasterProcedureManager} is loaded into {@link org.apache.hadoop.hbase.master.HMaster}
+* process via configuration parameter 'hbase.procedure.master.classes', while implementation of
+* {@link RegionServerProcedureManager} is loaded into
+* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process via
* configuration parameter 'hbase.procedure.regionserver.classes'.
*
-* An example of globally barriered procedure implementation is {@link SnapshotManager} and
-* {@link RegionServerSnapshotManager}.
+* An example of globally barriered procedure implementation is
+* {@link org.apache.hadoop.hbase.master.snapshot.SnapshotManager} and
+* {@link org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager}.
*
* A globally barriered procedure is identified by its signature (usually it is the name of the
* procedure znode). During the initialization phase, the initialize methods are called by both
-* {@link HMaster} and {@link HRegionServer} witch create the procedure znode and register the
-* listeners. A procedure can be triggered by its signature and an instant name (encapsulated in
-* a {@link ProcedureDescription} object). When the servers are shutdown, the stop methods on both
-* classes are called to clean up the data associated with the procedure.
+* {@link org.apache.hadoop.hbase.master.HMaster}
+* and {@link org.apache.hadoop.hbase.regionserver.HRegionServer} which create the procedure znode
+* and register the listeners. A procedure can be triggered by its signature and an instant name
+* (encapsulated in a {@link ProcedureDescription} object). When the servers are shutdown,
+* the stop methods on both classes are called to clean up the data associated with the procedure.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
index 8b4160392e5..854518939a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/MasterProcedureManagerHost.java
@@ -27,8 +27,8 @@ import org.apache.zookeeper.KeeperException;
/**
* Provides the globally barriered procedure framework and environment for
- * master oriented operations. {@link HMaster} interacts with the loaded
- * procedure manager through this class.
+ * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster}
+ * interacts with the loaded procedure manager through this class.
*/
public class MasterProcedureManagerHost extends
ProcedureManagerHost
* If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task.
* It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED
@@ -186,7 +186,8 @@ public class SplitLogWorker implements Runnable {
* acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
* guarantee that two workers will not be executing the same task therefore it
* is better to have workers prepare the task and then have the
- * {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher
+ * {@link org.apache.hadoop.hbase.master.SplitLogManager} commit the work in
+ * SplitLogManager.TaskFinisher
*/
public interface TaskExecutor {
enum Status {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index da4332a2bda..397044d6f7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.S
import org.apache.hadoop.ipc.RemoteException;
/**
- * A {@link ReplicationEndpoint} implementation for replicating to another HBase cluster.
+ * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
+ * implementation for replicating to another HBase cluster.
* For the slave cluster it selects a random number of peers
* using a replication ratio. For example, if replication ration = 0.1
* and slave cluster has 100 region servers, 10 will be selected.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
index acd49fd24d7..c3d4e5a5cc6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java
@@ -79,8 +79,9 @@ import com.google.common.cache.CacheBuilder;
import com.google.protobuf.ServiceException;
/**
- * A {@link ReplicationEndpoint} endpoint which receives the WAL edits from the
- * WAL, and sends the edits to replicas of regions.
+ * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
+ * which receives the WAL edits from the WAL, and sends the edits to replicas
+ * of regions.
*/
@InterfaceAudience.Private
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
index 7efa280afb5..5ff7207a334 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterFactory.java
@@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Handles Bloom filter initialization based on configuration and serialized
- * metadata in the reader and writer of {@link StoreFile}.
+ * metadata in the reader and writer of {@link org.apache.hadoop.hbase.regionserver.StoreFile}.
*/
@InterfaceAudience.Private
public final class BloomFilterFactory {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
index 059edc53527..cc9f898e23f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java
@@ -1706,7 +1706,7 @@ public abstract class FSUtils {
* This function is to scan the root path of the file system to get the
* degree of locality for each region on each of the servers having at least
* one block of that region.
- * This is used by the tool {@link RegionPlacementMaintainer}
+ * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer}
*
* @param conf
* the configuration to use
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
index 76d8fe41321..937e9b2a7f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java
@@ -134,7 +134,8 @@ public class HFileArchiveUtil {
}
/**
- * Get the full path to the archive directory on the configured {@link FileSystem}
+ * Get the full path to the archive directory on the configured
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
* @param conf to look for archive directory name and root directory. Cannot be null. Notes for
* testing: requires a FileSystem root directory to be specified.
* @return the full {@link Path} to the archive directory, as defined by the configuration
@@ -145,7 +146,8 @@ public class HFileArchiveUtil {
}
/**
- * Get the full path to the archive directory on the configured {@link FileSystem}
+ * Get the full path to the archive directory on the configured
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building
* the archive path)
* @return the full {@link Path} to the archive directory, as defined by the configuration
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
index 7a58aff32cd..ccfdf1dd486 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ClusterStatusTracker.java
@@ -28,9 +28,9 @@ import org.apache.zookeeper.KeeperException;
/**
* Tracker on cluster settings up in zookeeper.
- * This is not related to {@link ClusterStatus}. That class is a data structure
- * that holds snapshot of current view on cluster. This class is about tracking
- * cluster attributes up in zookeeper.
+ * This is not related to {@link org.apache.hadoop.hbase.ClusterStatus}. That class
+ * is a data structure that holds snapshot of current view on cluster. This class
+ * is about tracking cluster attributes up in zookeeper.
*
*/
@InterfaceAudience.Private
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
index 4563f9fd99d..78b3eed0864 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java
@@ -35,7 +35,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSeq
import org.apache.zookeeper.KeeperException;
/**
- * Common methods and attributes used by {@link SplitLogManager} and {@link SplitLogWorker}
+ * Common methods and attributes used by {@link org.apache.hadoop.hbase.master.SplitLogManager}
+ * and {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
* running distributed splitting of WAL logs.
*/
@InterfaceAudience.Private
* public void myMethod(RpcController controller, MyRequest request, RpcCallback
ReusablePool
represents a {@link PoolMap.Pool} that builds
- * on the {@link LinkedList} class. It essentially allows resources to be
+ * on the {@link java.util.LinkedList} class. It essentially allows resources to be
* checked out, at which point it is removed from this pool. When the resource
* is no longer required, it should be returned to the pool in order to be
* reused.
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
index c92450634af..de44ec6232b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
@@ -28,13 +28,14 @@ import org.apache.hadoop.hbase.io.CellOutputStream;
/**
* Encoder/Decoder for Cell.
*
- *
*
* {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
- * {@link MasterFileSystem}
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem}
* {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
* recovering. Called after all tasks processed
* {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
- * {@link MasterFileSystem} after Active Master is initialized
+ * {@link org.apache.hadoop.hbase.master.MasterFileSystem} after Active Master is initialized
* {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
* recovery has been made
* {@link #checkTaskStillAvailable(String)} Check that task is still there
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
index a925574efb3..707850d4f87 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
@@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
import com.google.common.annotations.VisibleForTesting;
/**
- * Coordinated operations for {@link SplitLogWorker} and {@link WALSplitterHandler} Important
+ * Coordinated operations for {@link SplitLogWorker} and
+ * {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important
* methods for SplitLogWorker:
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
* ready to supply the tasks
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 411b7f963be..4f511f02c16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -67,7 +67,8 @@ import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.data.Stat;
/**
- * ZooKeeper based implementation of {@link SplitLogManagerCoordination}
+ * ZooKeeper based implementation of
+ * {@link org.apache.hadoop.hbase.master.SplitLogManagerCoordination}
*/
@InterfaceAudience.Private
public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
@@ -682,7 +683,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
/**
* ZooKeeper implementation of
- * {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
+ * {@link org.apache.hadoop.hbase.master.
+ * SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
*/
@Override
public void removeStaleRecoveringRegions(final Set
*
*
* Splits are created in number equal to the smallest between numSplits and
- * the number of {@link HRegion}s in the table. If the number of splits is
- * smaller than the number of {@link HRegion}s then splits are spanned across
- * multiple {@link HRegion}s and are grouped the most evenly possible. In the
+ * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
+ * If the number of splits is smaller than the number of
+ * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
+ * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
+ * and are grouped the most evenly possible. In the
* case splits are uneven the bigger splits are placed first in the
* {@link InputSplit} array.
*
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
index f0944b6f3c5..281d13eef2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
@@ -59,7 +59,7 @@ implements RecordReader