HBASE-12603 Remove javadoc warnings introduced due to removal of unused imports (Varun Saxena)

This commit is contained in:
stack 2014-12-01 10:13:16 -08:00
parent b6b88edf93
commit 56a03d736a
64 changed files with 223 additions and 164 deletions

View File

@ -27,10 +27,14 @@ import java.lang.annotation.RetentionPolicy;
* class or method not changing over time. Currently the stability can be * class or method not changing over time. Currently the stability can be
* {@link Stable}, {@link Evolving} or {@link Unstable}. <br> * {@link Stable}, {@link Evolving} or {@link Unstable}. <br>
* *
* <ul><li>All classes that are annotated with {@link Public} or * <ul><li>All classes that are annotated with
* {@link LimitedPrivate} must have InterfaceStability annotation. </li> * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.Public} or
* <li>Classes that are {@link Private} are to be considered unstable unless * {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
* a different InterfaceStability annotation states otherwise.</li> * must have InterfaceStability annotation. </li>
* <li>Classes that are
* {@link org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate}
* are to be considered unstable unless a different InterfaceStability annotation
* states otherwise.</li>
* <li>Incompatible changes must not be made to classes marked as stable.</li> * <li>Incompatible changes must not be made to classes marked as stable.</li>
* </ul> * </ul>
*/ */

View File

@ -54,7 +54,8 @@ public class ReversedScannerCallable extends ScannerCallable {
* @param scan * @param scan
* @param scanMetrics * @param scanMetrics
* @param locateStartRow The start row for locating regions * @param locateStartRow The start row for locating regions
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver * @param rpcFactory to create an {@link com.google.protobuf.RpcController}
* to talk to the regionserver
*/ */
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) { ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) {
@ -68,7 +69,8 @@ public class ReversedScannerCallable extends ScannerCallable {
* @param scan * @param scan
* @param scanMetrics * @param scanMetrics
* @param locateStartRow The start row for locating regions * @param locateStartRow The start row for locating regions
* @param rpcFactory to create an {@link RpcController} to talk to the regionserver * @param rpcFactory to create an {@link com.google.protobuf.RpcController}
* to talk to the regionserver
* @param replicaId the replica id * @param replicaId the replica id
*/ */
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan,
@ -79,7 +81,8 @@ public class ReversedScannerCallable extends ScannerCallable {
/** /**
* @deprecated use * @deprecated use
* {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan, ScanMetrics, byte[], RpcControllerFactory )} * {@link #ReversedScannerCallable(ClusterConnection, TableName, Scan,
* ScanMetrics, byte[], RpcControllerFactory )}
*/ */
@Deprecated @Deprecated
public ReversedScannerCallable(ClusterConnection connection, TableName tableName, public ReversedScannerCallable(ClusterConnection connection, TableName tableName,

View File

@ -95,7 +95,8 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
* @param scan the scan to execute * @param scan the scan to execute
* @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect
* metrics * metrics
* @param rpcControllerFactory factory to use when creating {@link RpcController} * @param rpcControllerFactory factory to use when creating
* {@link com.google.protobuf.RpcController}
*/ */
public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan, public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan,
ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) { ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) {

View File

@ -30,8 +30,9 @@ import com.google.protobuf.Message;
* Defines how value for specific column is interpreted and provides utility * Defines how value for specific column is interpreted and provides utility
* methods like compare, add, multiply etc for them. Takes column family, column * methods like compare, add, multiply etc for them. Takes column family, column
* qualifier and return the cell value. Its concrete implementation should * qualifier and return the cell value. Its concrete implementation should
* handle null case gracefully. Refer to {@link LongColumnInterpreter} for an * handle null case gracefully.
* example. * Refer to {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}
* for an example.
* <p> * <p>
* Takes two generic parameters and three Message parameters. * Takes two generic parameters and three Message parameters.
* The cell value type of the interpreter is <T>. * The cell value type of the interpreter is <T>.
@ -127,7 +128,8 @@ Q extends Message, R extends Message> {
* server side to construct the ColumnInterpreter. The server * server side to construct the ColumnInterpreter. The server
* will pass this to the {@link #initialize} * will pass this to the {@link #initialize}
* method. If there is no ColumnInterpreter specific data (for e.g., * method. If there is no ColumnInterpreter specific data (for e.g.,
* {@link LongColumnInterpreter}) then null should be returned. * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter})
* then null should be returned.
* @return the PB message * @return the PB message
*/ */
public abstract P getRequestData(); public abstract P getRequestData();

View File

@ -41,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
* <p> * <p>
* Multiple filters can be combined using {@link FilterList}. * Multiple filters can be combined using {@link FilterList}.
* <p> * <p>
* If an already known column qualifier is looked for, use {@link Get#addColumn} * If an already known column qualifier is looked for,
* use {@link org.apache.hadoop.hbase.client.Get#addColumn}
* directly rather than a filter. * directly rather than a filter.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public

View File

@ -40,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
* <p> * <p>
* Multiple filters can be combined using {@link FilterList}. * Multiple filters can be combined using {@link FilterList}.
* <p> * <p>
* If an already known row range needs to be scanned, use {@link Scan} start * If an already known row range needs to be scanned,
* use {@link org.apache.hadoop.hbase.CellScanner} start
* and stop rows directly rather than a filter. * and stop rows directly rather than a filter.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public

View File

@ -52,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException;
* long value), then you can pass in your own comparator instead. * long value), then you can pass in your own comparator instead.
* <p> * <p>
* You must also specify a family and qualifier. Only the value of this column * You must also specify a family and qualifier. Only the value of this column
* will be tested. When using this filter on a {@link Scan} with specified * will be tested. When using this filter on a
* {@link org.apache.hadoop.hbase.CellScanner} with specified
* inputs, the column to be tested should also be added as input (otherwise * inputs, the column to be tested should also be added as input (otherwise
* the filter will regard the column as missing). * the filter will regard the column as missing).
* <p> * <p>

View File

@ -36,8 +36,8 @@ import com.google.protobuf.ServiceException;
/** /**
* Base class which provides clients with an RPC connection to * Base class which provides clients with an RPC connection to
* call coprocessor endpoint {@link Service}s. Note that clients should not use this class * call coprocessor endpoint {@link com.google.protobuf.Service}s.
* directly, except through * Note that clients should not use this class directly, except through
* {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}. * {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}.
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public

View File

@ -23,7 +23,8 @@ import org.apache.hadoop.ipc.RemoteException;
/** /**
* A {@link RemoteException} with some extra information. If source exception * A {@link RemoteException} with some extra information. If source exception
* was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true. * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException},
* {@link #isDoNotRetry()} will return true.
* <p>A {@link RemoteException} hosts exceptions we got from the server. * <p>A {@link RemoteException} hosts exceptions we got from the server.
*/ */
@SuppressWarnings("serial") @SuppressWarnings("serial")

View File

@ -29,11 +29,11 @@ import com.google.protobuf.RpcController;
/** /**
* Used for server-side protobuf RPC service invocations. This handler allows * Used for server-side protobuf RPC service invocations. This handler allows
* invocation exceptions to easily be passed through to the RPC server from coprocessor * invocation exceptions to easily be passed through to the RPC server from coprocessor
* {@link Service} implementations. * {@link com.google.protobuf.Service} implementations.
* *
* <p> * <p>
* When implementing {@link Service} defined methods, coprocessor endpoints can use the following * When implementing {@link com.google.protobuf.Service} defined methods,
* pattern to pass exceptions back to the RPC client: * coprocessor endpoints can use the following pattern to pass exceptions back to the RPC client:
* <code> * <code>
* public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) { * public void myMethod(RpcController controller, MyRequest request, RpcCallback<MyResponse> done) {
* MyResponse response = null; * MyResponse response = null;
@ -53,7 +53,8 @@ import com.google.protobuf.RpcController;
public class ServerRpcController implements RpcController { public class ServerRpcController implements RpcController {
/** /**
* The exception thrown within * The exception thrown within
* {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}, * {@link com.google.protobuf.Service#callMethod(
* Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)},
* if any. * if any.
*/ */
// TODO: it would be good widen this to just Throwable, but IOException is what we allow now // TODO: it would be good widen this to just Throwable, but IOException is what we allow now
@ -97,7 +98,7 @@ public class ServerRpcController implements RpcController {
} }
/** /**
* Sets an exception to be communicated back to the {@link Service} client. * Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client.
* @param ioe the exception encountered during execution of the service method * @param ioe the exception encountered during execution of the service method
*/ */
public void setFailedOn(IOException ioe) { public void setFailedOn(IOException ioe) {

View File

@ -281,7 +281,7 @@ public class PoolMap<K, V> implements Map<K, V> {
/** /**
* The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds * The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
* on the {@link LinkedList} class. It essentially allows resources to be * on the {@link java.util.LinkedList} class. It essentially allows resources to be
* checked out, at which point it is removed from this pool. When the resource * checked out, at which point it is removed from this pool. When the resource
* is no longer required, it should be returned to the pool in order to be * is no longer required, it should be returned to the pool in order to be
* reused. * reused.

View File

@ -28,13 +28,14 @@ import org.apache.hadoop.hbase.io.CellOutputStream;
/** /**
* Encoder/Decoder for Cell. * Encoder/Decoder for Cell.
* *
* <p>Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based * <p>Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
* only Cell-based rather than KeyValue version 1 based
* and without presuming an hfile context. Intent is an Interface that will work for hfile and * and without presuming an hfile context. Intent is an Interface that will work for hfile and
* rpc. * rpc.
*/ */
@InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
public interface Codec { public interface Codec {
// TODO: interfacing with {@link DataBlockEncoder} // TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder}
/** /**
* Call flush when done. Some encoders may not put anything on the stream until flush is called. * Call flush when done. Some encoders may not put anything on the stream until flush is called.
* On flush, let go of any resources used by the encoder. * On flush, let go of any resources used by the encoder.

View File

@ -29,8 +29,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* and flushes, or to build a byte[] to send to the client. This could be backed by a * and flushes, or to build a byte[] to send to the client. This could be backed by a
* List<KeyValue>, but more efficient implementations will append results to a * List<KeyValue>, but more efficient implementations will append results to a
* byte[] to eliminate overhead, and possibly encode the cells further. * byte[] to eliminate overhead, and possibly encode the cells further.
* <p>To read Cells, use {@link CellScanner} * <p>To read Cells, use {@link org.apache.hadoop.hbase.CellScanner}
* @see CellScanner * @see org.apache.hadoop.hbase.CellScanner
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -32,7 +32,8 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
* qualifiers. * qualifiers.
* </p> * </p>
* <p> * <p>
* {@code DataType}s are different from Hadoop {@link Writable}s in two * {@code DataType}s are different from Hadoop
* {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two
* significant ways. First, {@code DataType} describes how to serialize a * significant ways. First, {@code DataType} describes how to serialize a
* value, it does not encapsulate a serialized value. Second, {@code DataType} * value, it does not encapsulate a serialized value. Second, {@code DataType}
* implementations provide hints to consumers about relationships between the * implementations provide hints to consumers about relationships between the

View File

@ -24,9 +24,11 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
/** /**
* An {@code DataType} that encodes fixed-length values encoded using * An {@code DataType} that encodes fixed-length values encoded using
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(
* easier to transition away from direct use of {@link Bytes}. * byte[], int, byte[], int, int)}. Intended to make it
* @see Bytes#putBytes(byte[], int, byte[], int, int) * easier to transition away from direct use of
* {@link org.apache.hadoop.hbase.util.Bytes}.
* @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
* @see RawBytes * @see RawBytes
* @see OrderedBlob * @see OrderedBlob
* @see OrderedBlobVar * @see OrderedBlobVar

View File

@ -24,10 +24,10 @@ import org.apache.hadoop.hbase.util.PositionedByteRange;
/** /**
* An {@code DataType} that encodes variable-length values encoded using * An {@code DataType} that encodes variable-length values encoded using
* {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}.
* termination marker following the raw {@code byte[]} value. Intended to * Includes a termination marker following the raw {@code byte[]} value. Intended to make it easier
* make it easier to transition away from direct use of {@link Bytes}. * to transition away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
* @see Bytes#putBytes(byte[], int, byte[], int, int) * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)
* @see RawBytes * @see RawBytes
* @see OrderedBlob * @see OrderedBlob
*/ */

View File

@ -23,10 +23,11 @@ import org.apache.hadoop.hbase.util.Order;
/** /**
* An {@code DataType} that encodes fixed-length values encoded using * An {@code DataType} that encodes fixed-length values encoded using
* {@link Bytes#toBytes(String)}. Intended to make it easier to transition * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
* away from direct use of {@link Bytes}. * Intended to make it easier to transition away from direct use of
* @see Bytes#toBytes(String) * {@link org.apache.hadoop.hbase.util.Bytes}.
* @see Bytes#toString(byte[], int, int) * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
* @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
* @see RawString * @see RawString
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public

View File

@ -23,11 +23,12 @@ import org.apache.hadoop.hbase.util.Order;
/** /**
* An {@code DataType} that encodes variable-length values encoded using * An {@code DataType} that encodes variable-length values encoded using
* {@link Bytes#toBytes(String)}. Includes a termination marker following the * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}.
* Includes a termination marker following the
* raw {@code byte[]} value. Intended to make it easier to transition * raw {@code byte[]} value. Intended to make it easier to transition
* away from direct use of {@link Bytes}. * away from direct use of {@link org.apache.hadoop.hbase.util.Bytes}.
* @see Bytes#toBytes(String) * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String)
* @see Bytes#toString(byte[], int, int) * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int)
* @see RawString * @see RawString
* @see OrderedString * @see OrderedString
*/ */

View File

@ -35,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting;
public abstract class AbstractPositionedByteRange extends AbstractByteRange implements public abstract class AbstractPositionedByteRange extends AbstractByteRange implements
PositionedByteRange { PositionedByteRange {
/** /**
* The current index into the range. Like {@link ByteBuffer} position, it * The current index into the range. Like {@link java.nio.ByteBuffer} position, it
* points to the next value that will be read/written in the array. It * points to the next value that will be read/written in the array. It
* provides the appearance of being 0-indexed, even though its value is * provides the appearance of being 0-indexed, even though its value is
* calculated according to offset. * calculated according to offset.
@ -182,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
@Override @Override
public abstract int putVLong(long val); public abstract int putVLong(long val);
/** /**
* Similar to {@link ByteBuffer#flip()}. Sets length to position, position to * Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to
* offset. * offset.
*/ */
@VisibleForTesting @VisibleForTesting
@ -194,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl
} }
/** /**
* Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to * Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to
* capacity. * capacity.
*/ */
@VisibleForTesting @VisibleForTesting

View File

@ -33,7 +33,7 @@ import com.google.common.base.Supplier;
/** /**
* A simple concurrent map of sets. This is similar in concept to * A simple concurrent map of sets. This is similar in concept to
* {@link Multiset}, with the following exceptions: * {@link com.google.common.collect.Multiset}, with the following exceptions:
* <ul> * <ul>
* <li>The set is thread-safe and concurrent: no external locking or * <li>The set is thread-safe and concurrent: no external locking or
* synchronization is required. This is important for the use case where * synchronization is required. This is important for the use case where
@ -109,7 +109,7 @@ public class ConcurrentIndex<K, V> {
* associated. <b>Note:</b> if the caller wishes to add or removes values * associated. <b>Note:</b> if the caller wishes to add or removes values
* to under the specified as they're iterating through the returned value, * to under the specified as they're iterating through the returned value,
* they should make a defensive copy; otherwise, a * they should make a defensive copy; otherwise, a
* {@link ConcurrentModificationException} may be thrown. * {@link java.util.ConcurrentModificationException} may be thrown.
* @param key The key * @param key The key
* @return All values associated with the specified key or null if no values * @return All values associated with the specified key or null if no values
* are associated with the key. * are associated with the key.

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
* their own index into the array. * their own index into the array.
* </p> * </p>
* <p> * <p>
* Designed to be a slimmed-down, mutable alternative to {@link ByteBuffer}. * Designed to be a slimmed-down, mutable alternative to {@link java.nio.ByteBuffer}.
* </p> * </p>
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public

View File

@ -51,7 +51,8 @@ import org.apache.zookeeper.ZooKeeper;
* This is an example showing how a RegionObserver could configured * This is an example showing how a RegionObserver could configured
* via ZooKeeper in order to control a Region compaction, flush, and scan policy. * via ZooKeeper in order to control a Region compaction, flush, and scan policy.
* *
* This also demonstrated the use of shared {@link RegionObserver} state. * This also demonstrated the use of shared
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state.
* See {@link RegionCoprocessorEnvironment#getSharedData()}. * See {@link RegionCoprocessorEnvironment#getSharedData()}.
* *
* This would be useful for an incremental backup tool, which would indicate the last * This would be useful for an incremental backup tool, which would indicate the last
@ -59,7 +60,8 @@ import org.apache.zookeeper.ZooKeeper;
* inserted since (based on wall clock time). * inserted since (based on wall clock time).
* *
* This implements org.apache.zookeeper.Watcher directly instead of using * This implements org.apache.zookeeper.Watcher directly instead of using
* {@link ZooKeeperWatcher}, because RegionObservers come and go and currently * {@link org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher},
* because RegionObservers come and go and currently
* listeners registered with ZooKeeperWatcher cannot be removed. * listeners registered with ZooKeeperWatcher cannot be removed.
*/ */
public class ZooKeeperScanPolicyObserver extends BaseRegionObserver { public class ZooKeeperScanPolicyObserver extends BaseRegionObserver {

View File

@ -35,9 +35,11 @@ import org.apache.zookeeper.KeeperException;
* {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is * {@link BaseHFileCleanerDelegate} that only cleans HFiles that don't belong to a table that is
* currently being archived. * currently being archived.
* <p> * <p>
* This only works properly if the {@link TimeToLiveHFileCleaner} is also enabled (it always should * This only works properly if the
* be), since it may take a little time for the ZK notification to propagate, in which case we may * {@link org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner}
* accidentally delete some files. * is also enabled (it always should be), since it may take a little time
* for the ZK notification to propagate, in which case we may accidentally
* delete some files.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate { public class LongTermArchivingHFileCleaner extends BaseHFileCleanerDelegate {

View File

@ -49,7 +49,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
* <p> * <p>
* This also allows one to run the scan from an * This also allows one to run the scan from an
* online or offline hbase cluster. The snapshot files can be exported by using the * online or offline hbase cluster. The snapshot files can be exported by using the
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this scanner can be used to * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
* to a pure-hdfs cluster, and this scanner can be used to
* run the scan directly over the snapshot files. The snapshot should not be deleted while there * run the scan directly over the snapshot files. The snapshot should not be deleted while there
* are open scanners reading from snapshot files. * are open scanners reading from snapshot files.
* *
@ -65,7 +66,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
* snapshot files, the job has to be run as the HBase user or the user must have group or other * snapshot files, the job has to be run as the HBase user or the user must have group or other
* priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from * priviledges in the filesystem (See HBASE-8369). Note that, given other users access to read from
* snapshot/data files will completely circumvent the access control enforced by HBase. * snapshot/data files will completely circumvent the access control enforced by HBase.
* @see TableSnapshotInputFormat * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.constraint;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
/** /**
* Exception that a user defined constraint throws on failure of a {@link Put}. * Exception that a user defined constraint throws on failure of a
* <p> * {@link org.apache.hadoop.hbase.client.Put}.
* Does <b>NOT</b> attempt the {@link Put} multiple times, since the constraint * <p>Does <b>NOT</b> attempt the
* <it>should</it> fail every time for the same {@link Put} (it should be * {@link org.apache.hadoop.hbase.client.Put} multiple times,
* since the constraint <it>should</it> fail every time for
* the same {@link org.apache.hadoop.hbase.client.Put} (it should be
* idempotent). * idempotent).
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -42,11 +42,11 @@ import com.google.common.annotations.VisibleForTesting;
* <P> * <P>
* Methods required for task life circle: <BR> * Methods required for task life circle: <BR>
* {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by * {@link #markRegionsRecovering(ServerName, Set)} mark regions for log replaying. Used by
* {@link MasterFileSystem} <BR> * {@link org.apache.hadoop.hbase.master.MasterFileSystem} <BR>
* {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as * {@link #removeRecoveringRegions(Set, Boolean)} make regions cleanup that previous were marked as
* recovering. Called after all tasks processed <BR> * recovering. Called after all tasks processed <BR>
* {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by * {@link #removeStaleRecoveringRegions(Set)} remove stale recovering. called by
* {@link MasterFileSystem} after Active Master is initialized <BR> * {@link org.apache.hadoop.hbase.master.MasterFileSystem} after Active Master is initialized <BR>
* {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last * {@link #getLastRecoveryTime()} required for garbage collector and should indicate when the last
* recovery has been made<BR> * recovery has been made<BR>
* {@link #checkTaskStillAvailable(String)} Check that task is still there <BR> * {@link #checkTaskStillAvailable(String)} Check that task is still there <BR>

View File

@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
/** /**
* Coordinated operations for {@link SplitLogWorker} and {@link WALSplitterHandler} Important * Coordinated operations for {@link SplitLogWorker} and
* {@link org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler} Important
* methods for SplitLogWorker: <BR> * methods for SplitLogWorker: <BR>
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is * {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
* ready to supply the tasks <BR> * ready to supply the tasks <BR>

View File

@ -67,7 +67,8 @@ import org.apache.zookeeper.ZooDefs.Ids;
import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.data.Stat;
/** /**
* ZooKeeper based implementation of {@link SplitLogManagerCoordination} * ZooKeeper based implementation of
* {@link org.apache.hadoop.hbase.master.SplitLogManagerCoordination}
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
@ -682,7 +683,8 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
/** /**
* ZooKeeper implementation of * ZooKeeper implementation of
* {@link SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)} * {@link org.apache.hadoop.hbase.master.
* SplitLogManagerCoordination#removeStaleRecoveringRegions(Set)}
*/ */
@Override @Override
public void removeStaleRecoveringRegions(final Set<String> knownFailedServers) public void removeStaleRecoveringRegions(final Set<String> knownFailedServers)
@ -904,9 +906,10 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
/** /**
* {@link SplitLogManager} can use objects implementing this interface to finish off a partially * {@link org.apache.hadoop.hbase.master.SplitLogManager} can use objects implementing this
* done task by {@link SplitLogWorker}. This provides a serialization point at the end of the task * interface to finish off a partially done task by
* processing. Must be restartable and idempotent. * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}. This provides a
* serialization point at the end of the task processing. Must be restartable and idempotent.
*/ */
public interface TaskFinisher { public interface TaskFinisher {
/** /**
@ -1067,7 +1070,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
* Asynchronous handler for zk create RESCAN-node results. Retries on failures. * Asynchronous handler for zk create RESCAN-node results. Retries on failures.
* <p> * <p>
* A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the * A RESCAN node is created using PERSISTENT_SEQUENTIAL flag. It is a signal for all the
* {@link SplitLogWorker}s to rescan for new tasks. * {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}s to rescan for new tasks.
*/ */
public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback { public class CreateRescanAsyncCallback implements AsyncCallback.StringCallback {
private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class); private final Log LOG = LogFactory.getLog(CreateRescanAsyncCallback.class);

View File

@ -583,8 +583,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
* Next part is related to WALSplitterHandler * Next part is related to WALSplitterHandler
*/ */
/** /**
* endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to * endTask() can fail and the only way to recover out of it is for the
* timeout the task node. * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
* @param slt * @param slt
* @param ctr * @param ctr
*/ */

View File

@ -212,8 +212,9 @@ public interface RegionObserver extends Coprocessor {
* options: * options:
* <ul> * <ul>
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped * from this method. The custom scanner can then inspect
* scanner, applying its own policy to what gets written.</li> * {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
* policy to what gets written.</li>
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a * <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations * custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
* bypassing core compaction using this approach must write out new store files themselves or the * bypassing core compaction using this approach must write out new store files themselves or the
@ -238,8 +239,9 @@ public interface RegionObserver extends Coprocessor {
* options: * options:
* <ul> * <ul>
* <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned * <li>Wrap the provided {@link InternalScanner} with a custom implementation that is returned
* from this method. The custom scanner can then inspect {@link KeyValue}s from the wrapped * from this method. The custom scanner can then inspect
* scanner, applying its own policy to what gets written.</li> * {@link org.apache.hadoop.hbase.KeyValue}s from the wrapped scanner, applying its own
* policy to what gets written.</li>
* <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a * <li>Call {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} and provide a
* custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations * custom implementation for writing of new {@link StoreFile}s. <strong>Note: any implementations
* bypassing core compaction using this approach must write out new store files themselves or the * bypassing core compaction using this approach must write out new store files themselves or the
@ -269,7 +271,8 @@ public interface RegionObserver extends Coprocessor {
* effect in this hook. * effect in this hook.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param scanners the list {@link StoreFileScanner}s to be read from * @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
* to be read from
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction * @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store * @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
* files * files
@ -293,7 +296,8 @@ public interface RegionObserver extends Coprocessor {
* effect in this hook. * effect in this hook.
* @param c the environment provided by the region server * @param c the environment provided by the region server
* @param store the store being compacted * @param store the store being compacted
* @param scanners the list {@link StoreFileScanner}s to be read from * @param scanners the list {@link org.apache.hadoop.hbase.regionserver.StoreFileScanner}s
* to be read from
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction * @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved store * @param earliestPutTs timestamp of the earliest put that was found in any of the involved store
* files * files

View File

@ -339,9 +339,9 @@ public class HFileBlock implements Cacheable {
/** /**
* Returns the buffer this block stores internally. The clients must not * Returns the buffer this block stores internally. The clients must not
* modify the buffer object. This method has to be public because it is * modify the buffer object. This method has to be public because it is
* used in {@link CompoundBloomFilter} to avoid object creation on every * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} to avoid object
* Bloom filter lookup, but has to be used with caution. Checksum data * creation on every Bloom filter lookup, but has to be used with caution.
* is not included in the returned buffer but header data is. * Checksum data is not included in the returned buffer but header data is.
* *
* @return the buffer of this block for read-only operations * @return the buffer of this block for read-only operations
*/ */
@ -354,7 +354,7 @@ public class HFileBlock implements Cacheable {
/** /**
* Returns the buffer of this block, including header data. The clients must * Returns the buffer of this block, including header data. The clients must
* not modify the buffer object. This method has to be public because it is * not modify the buffer object. This method has to be public because it is
* used in {@link BucketCache} to avoid buffer copy. * used in {@link org.apache.hadoop.hbase.io.hfile.bucket.BucketCache} to avoid buffer copy.
* *
* @return the buffer with header and checksum included for read-only operations * @return the buffer with header and checksum included for read-only operations
*/ */

View File

@ -54,9 +54,9 @@ import org.apache.hadoop.util.StringUtils;
* ({@link BlockIndexReader}) single-level and multi-level block indexes. * ({@link BlockIndexReader}) single-level and multi-level block indexes.
* *
* Examples of how to use the block index writer can be found in * Examples of how to use the block index writer can be found in
* {@link CompoundBloomFilterWriter} and {@link HFileWriterV2}. Examples of how * {@link org.apache.hadoop.hbase.util.CompoundBloomFilterWriter} and
* to use the reader can be found in {@link HFileReaderV2} and * {@link HFileWriterV2}. Examples of how to use the reader can be
* TestHFileBlockIndex. * found in {@link HFileReaderV2} and TestHFileBlockIndex.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class HFileBlockIndex { public class HFileBlockIndex {

View File

@ -131,7 +131,7 @@ public interface HFileScanner {
*/ */
ByteBuffer getValue(); ByteBuffer getValue();
/** /**
* @return Instance of {@link KeyValue}. * @return Instance of {@link org.apache.hadoop.hbase.KeyValue}.
*/ */
Cell getKeyValue(); Cell getKeyValue();
/** /**

View File

@ -57,16 +57,16 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* {@link ConcurrentHashMap} and with a non-blocking eviction thread giving * {@link ConcurrentHashMap} and with a non-blocking eviction thread giving
* constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p> * constant-time {@link #cacheBlock} and {@link #getBlock} operations.<p>
* *
* Contains three levels of block priority to allow for * Contains three levels of block priority to allow for scan-resistance and in-memory families
* scan-resistance and in-memory families {@link HColumnDescriptor#setInMemory(boolean)} (An * {@link org.apache.hadoop.hbase.HColumnDescriptor#setInMemory(boolean)} (An in-memory column
* in-memory column family is a column family that should be served from memory if possible): * family is a column family that should be served from memory if possible):
* single-access, multiple-accesses, and in-memory priority. * single-access, multiple-accesses, and in-memory priority.
* A block is added with an in-memory priority flag if * A block is added with an in-memory priority flag if
* {@link HColumnDescriptor#isInMemory()}, otherwise a block becomes a single access * {@link org.apache.hadoop.hbase.HColumnDescriptor#isInMemory()}, otherwise a block becomes a
* priority the first time it is read into this block cache. If a block is accessed again while * single access priority the first time it is read into this block cache. If a block is
* in cache, it is marked as a multiple access priority block. This delineation of blocks is used * accessed again while in cache, it is marked as a multiple access priority block. This
* to prevent scans from thrashing the cache adding a least-frequently-used * delineation of blocks is used to prevent scans from thrashing the cache adding a
* element to the eviction algorithm.<p> * least-frequently-used element to the eviction algorithm.<p>
* *
* Each priority is given its own chunk of the total cache to ensure * Each priority is given its own chunk of the total cache to ensure
* fairness during eviction. Each priority will retain close to its maximum * fairness during eviction. Each priority will retain close to its maximum

View File

@ -82,8 +82,8 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache}
* *
* <p>BucketCache can be used as mainly a block cache (see * <p>BucketCache can be used as mainly a block cache (see
* {@link CombinedBlockCache}), combined with LruBlockCache to decrease CMS GC and * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
* heap fragmentation. * LruBlockCache to decrease CMS GC and heap fragmentation.
* *
* <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store * <p>It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
* blocks) to enlarge cache space via * blocks) to enlarge cache space via

View File

@ -38,7 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
/** /**
* A job with a map to count rows. * A job with a map to count rows.
* Map outputs table rows IF the input row has columns that have content. * Map outputs table rows IF the input row has columns that have content.
* Uses an {@link IdentityReducer} * Uses a org.apache.hadoop.mapred.lib.IdentityReducer
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable

View File

@ -103,9 +103,11 @@ implements InputFormat<ImmutableBytesWritable, Result> {
* Calculates the splits that will serve as input for the map tasks. * Calculates the splits that will serve as input for the map tasks.
* <ul> * <ul>
* Splits are created in number equal to the smallest between numSplits and * Splits are created in number equal to the smallest between numSplits and
* the number of {@link HRegion}s in the table. If the number of splits is * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
* smaller than the number of {@link HRegion}s then splits are spanned across * If the number of splits is smaller than the number of
* multiple {@link HRegion}s and are grouped the most evenly possible. In the * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
* multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
* and are grouped the most evenly possible. In the
* case splits are uneven the bigger splits are placed first in the * case splits are uneven the bigger splits are placed first in the
* {@link InputSplit} array. * {@link InputSplit} array.
* *

View File

@ -59,7 +59,7 @@ implements RecordReader<ImmutableBytesWritable, Result> {
} }
/** /**
* @param htable the {@link HTable} to scan. * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/ */
public void setHTable(Table htable) { public void setHTable(Table htable) {
this.recordReaderImpl.setHTable(htable); this.recordReaderImpl.setHTable(htable);

View File

@ -113,7 +113,7 @@ public class TableRecordReaderImpl {
return this.startRow; return this.startRow;
} }
/** /**
* @param htable the {@link HTable} to scan. * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/ */
public void setHTable(Table htable) { public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration(); Configuration conf = htable.getConfiguration();

View File

@ -32,7 +32,8 @@ import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.hadoop.mapreduce.TaskAttemptContext;
/** /**
* Simple {@link InputFormat} for {@link WAL} files. * Simple {@link InputFormat} for {@link org.apache.hadoop.hbase.wal.WAL}
* files.
* @deprecated use {@link WALInputFormat} * @deprecated use {@link WALInputFormat}
*/ */
@Deprecated @Deprecated

View File

@ -59,9 +59,9 @@ extends TableReducer<Writable, Mutation, Writable> {
/** /**
* Writes each given record, consisting of the row key and the given values, * Writes each given record, consisting of the row key and the given values,
* to the configured {@link OutputFormat}. It is emitting the row key and each * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}.
* {@link org.apache.hadoop.hbase.client.Put Put} or * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put}
* {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs.
* *
* @param key The current row key. * @param key The current row key.
* @param values The {@link org.apache.hadoop.hbase.client.Put Put} or * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or

View File

@ -119,7 +119,7 @@ public class TableRecordReaderImpl {
/** /**
* Sets the HBase table. * Sets the HBase table.
* *
* @param htable The {@link HTable} to scan. * @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
*/ */
public void setHTable(Table htable) { public void setHTable(Table htable) {
Configuration conf = htable.getConfiguration(); Configuration conf = htable.getConfiguration();

View File

@ -49,9 +49,9 @@ import com.google.common.annotations.VisibleForTesting;
* wals, etc) directly to provide maximum performance. The snapshot is not required to be * wals, etc) directly to provide maximum performance. The snapshot is not required to be
* restored to the live cluster or cloned. This also allows to run the mapreduce job from an * restored to the live cluster or cloned. This also allows to run the mapreduce job from an
* online or offline hbase cluster. The snapshot files can be exported by using the * online or offline hbase cluster. The snapshot files can be exported by using the
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
* run the mapreduce job directly over the snapshot files. The snapshot should not be deleted * and this InputFormat can be used to run the mapreduce job directly over the snapshot files.
* while there are jobs reading from snapshot files. * The snapshot should not be deleted while there are jobs reading from snapshot files.
* <p> * <p>
* Usage is similar to TableInputFormat, and * Usage is similar to TableInputFormat, and
* {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job,
@ -68,8 +68,8 @@ import com.google.common.annotations.VisibleForTesting;
* <p> * <p>
* Internally, this input format restores the snapshot into the given tmp directory. Similar to * Internally, this input format restores the snapshot into the given tmp directory. Similar to
* {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading * {@link TableInputFormat} an InputSplit is created per region. The region is opened for reading
* from each RecordReader. An internal RegionScanner is used to execute the {@link Scan} obtained * from each RecordReader. An internal RegionScanner is used to execute the
* from the user. * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user.
* <p> * <p>
* HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from
* snapshot files and data files. * snapshot files and data files.
@ -79,7 +79,7 @@ import com.google.common.annotations.VisibleForTesting;
* user or the user must have group or other privileges in the filesystem (See HBASE-8369). * user or the user must have group or other privileges in the filesystem (See HBASE-8369).
* Note that, given other users access to read from snapshot/data files will completely circumvent * Note that, given other users access to read from snapshot/data files will completely circumvent
* the access control enforced by HBase. * the access control enforced by HBase.
* @see TableSnapshotScanner * @see org.apache.hadoop.hbase.client.TableSnapshotScanner
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -150,7 +150,7 @@ import com.google.protobuf.Service;
* *
* <p>You can also shutdown just this master. Call {@link #stopMaster()}. * <p>You can also shutdown just this master. Call {@link #stopMaster()}.
* *
* @see Watcher * @see org.apache.zookeeper.Watcher
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")

View File

@ -74,8 +74,9 @@ import com.google.common.annotations.VisibleForTesting;
* <p>SplitLogManager monitors the tasks that it creates using the * <p>SplitLogManager monitors the tasks that it creates using the
* timeoutMonitor thread. If a task's progress is slow then * timeoutMonitor thread. If a task's progress is slow then
* {@link SplitLogManagerCoordination#checkTasks} will take away the * {@link SplitLogManagerCoordination#checkTasks} will take away the
* task from the owner {@link SplitLogWorker} and the task will be up for grabs again. When the * task from the owner {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
* task is done then it is deleted by SplitLogManager. * and the task will be up for grabs again. When the task is done then it is
* deleted by SplitLogManager.
* *
* <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's * <p>Clients call {@link #splitLogDistributed(Path)} to split a region server's
* log files. The caller thread waits in this method until all the log files * log files. The caller thread waits in this method until all the log files

View File

@ -60,8 +60,9 @@ import com.google.common.collect.Sets;
/** /**
* The base class for load balancers. It provides the the functions used to by * The base class for load balancers. It provides the the functions used to by
* {@link AssignmentManager} to assign regions in the edge cases. It doesn't * {@link org.apache.hadoop.hbase.master.AssignmentManager} to assign regions
* provide an implementation of the actual balancing algorithm. * in the edge cases. It doesn't provide an implementation of the
* actual balancing algorithm.
* *
*/ */
public abstract class BaseLoadBalancer implements LoadBalancer { public abstract class BaseLoadBalancer implements LoadBalancer {

View File

@ -41,16 +41,17 @@ import org.apache.hadoop.hbase.master.balancer.FavoredNodesPlan.Position;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
/** /**
* An implementation of the {@link LoadBalancer} that assigns favored nodes for * An implementation of the {@link org.apache.hadoop.hbase.master.LoadBalancer} that
* each region. There is a Primary RegionServer that hosts the region, and then * assigns favored nodes for each region. There is a Primary RegionServer that hosts
* there is Secondary and Tertiary RegionServers. Currently, the favored nodes * the region, and then there is Secondary and Tertiary RegionServers. Currently, the
* information is used in creating HDFS files - the Primary RegionServer passes * favored nodes information is used in creating HDFS files - the Primary RegionServer
* the primary, secondary, tertiary node addresses as hints to the DistributedFileSystem * passes the primary, secondary, tertiary node addresses as hints to the
* API for creating files on the filesystem. These nodes are treated as hints by * DistributedFileSystem API for creating files on the filesystem. These nodes are
* the HDFS to place the blocks of the file. This alleviates the problem to do with * treated as hints by the HDFS to place the blocks of the file. This alleviates the
* reading from remote nodes (since we can make the Secondary RegionServer as the new * problem to do with reading from remote nodes (since we can make the Secondary
* Primary RegionServer) after a region is recovered. This should help provide consistent * RegionServer as the new Primary RegionServer) after a region is recovered. This
* read latencies for the regions even when their primary region servers die. * should help provide consistent read latencies for the regions even when their
* primary region servers die.
* *
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)

View File

@ -51,7 +51,8 @@ import com.google.common.collect.MinMaxPriorityQueue;
* <p>On cluster startup, bulk assignment can be used to determine * <p>On cluster startup, bulk assignment can be used to determine
* locations for all Regions in a cluster. * locations for all Regions in a cluster.
* *
* <p>This classes produces plans for the {@link AssignmentManager} to execute. * <p>This classes produces plans for the
* {@link org.apache.hadoop.hbase.master.AssignmentManager} to execute.
*/ */
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public class SimpleLoadBalancer extends BaseLoadBalancer { public class SimpleLoadBalancer extends BaseLoadBalancer {

View File

@ -33,9 +33,10 @@ import org.apache.hadoop.hbase.master.MasterServices;
/** /**
* Handle logReplay work from SSH. Having a separate handler is not to block SSH in re-assigning * Handle logReplay work from SSH. Having a separate handler is not to block SSH in re-assigning
* regions from dead servers. Otherwise, available SSH handlers could be blocked by logReplay work * regions from dead servers. Otherwise, available SSH handlers could be blocked by logReplay work
* (from {@link MasterFileSystem#splitLog(ServerName)}). During logReplay, if a receiving RS(say A) * (from {@link org.apache.hadoop.hbase.master.MasterFileSystem#splitLog(ServerName)}).
* fails again, regions on A won't be able to be assigned to another live RS which causes the log * During logReplay, if a receiving RS(say A) fails again, regions on A won't be able
* replay unable to complete because WAL edits replay depends on receiving RS to be live * to be assigned to another live RS which causes the log replay unable to complete
* because WAL edits replay depends on receiving RS to be live
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class LogReplayHandler extends EventHandler { public class LogReplayHandler extends EventHandler {

View File

@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
/** /**
* Implementors tote an HRegionInfo instance. * Implementors tote an HRegionInfo instance.
* This is a marker interface that can be put on {@link EventHandler}s that * This is a marker interface that can be put on {@link java.beans.EventHandler}s that
* have an {@link HRegionInfo}. * have an {@link HRegionInfo}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -98,7 +98,8 @@ public final class MasterSnapshotVerifier {
/** /**
* Verify that the snapshot in the directory is a valid snapshot * Verify that the snapshot in the directory is a valid snapshot
* @param snapshotDir snapshot directory to check * @param snapshotDir snapshot directory to check
* @param snapshotServers {@link ServerName} of the servers that are involved in the snapshot * @param snapshotServers {@link org.apache.hadoop.hbase.ServerName} of the servers
* that are involved in the snapshot
* @throws CorruptedSnapshotException if the snapshot is invalid * @throws CorruptedSnapshotException if the snapshot is invalid
* @throws IOException if there is an unexpected connection issue to the filesystem * @throws IOException if there is an unexpected connection issue to the filesystem
*/ */

View File

@ -34,20 +34,23 @@ import org.apache.zookeeper.KeeperException;
* *
* To implement a custom globally barriered procedure, user needs to extend two classes: * To implement a custom globally barriered procedure, user needs to extend two classes:
* {@link MasterProcedureManager} and {@link RegionServerProcedureManager}. Implementation of * {@link MasterProcedureManager} and {@link RegionServerProcedureManager}. Implementation of
* {@link MasterProcedureManager} is loaded into {@link HMaster} process via configuration * {@link MasterProcedureManager} is loaded into {@link org.apache.hadoop.hbase.master.HMaster}
* parameter 'hbase.procedure.master.classes', while implementation of * process via configuration parameter 'hbase.procedure.master.classes', while implementation of
* {@link RegionServerProcedureManager} is loaded into {@link HRegionServer} process via * {@link RegionServerProcedureManager} is loaded into
* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} process via
* configuration parameter 'hbase.procedure.regionserver.classes'. * configuration parameter 'hbase.procedure.regionserver.classes'.
* *
* An example of globally barriered procedure implementation is {@link SnapshotManager} and * An example of globally barriered procedure implementation is
* {@link RegionServerSnapshotManager}. * {@link org.apache.hadoop.hbase.master.snapshot.SnapshotManager} and
* {@link org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager}.
* *
* A globally barriered procedure is identified by its signature (usually it is the name of the * A globally barriered procedure is identified by its signature (usually it is the name of the
* procedure znode). During the initialization phase, the initialize methods are called by both * procedure znode). During the initialization phase, the initialize methods are called by both
* {@link HMaster} and {@link HRegionServer} witch create the procedure znode and register the * {@link org.apache.hadoop.hbase.master.HMaster}
* listeners. A procedure can be triggered by its signature and an instant name (encapsulated in * and {@link org.apache.hadoop.hbase.regionserver.HRegionServer} which create the procedure znode
* a {@link ProcedureDescription} object). When the servers are shutdown, the stop methods on both * and register the listeners. A procedure can be triggered by its signature and an instant name
* classes are called to clean up the data associated with the procedure. * (encapsulated in a {@link ProcedureDescription} object). When the servers are shutdown,
* the stop methods on both classes are called to clean up the data associated with the procedure.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -27,8 +27,8 @@ import org.apache.zookeeper.KeeperException;
/** /**
* Provides the globally barriered procedure framework and environment for * Provides the globally barriered procedure framework and environment for
* master oriented operations. {@link HMaster} interacts with the loaded * master oriented operations. {@link org.apache.hadoop.hbase.master.HMaster}
* procedure manager through this class. * interacts with the loaded procedure manager through this class.
*/ */
public class MasterProcedureManagerHost extends public class MasterProcedureManagerHost extends
ProcedureManagerHost<MasterProcedureManager> { ProcedureManagerHost<MasterProcedureManager> {

View File

@ -29,7 +29,8 @@ import org.apache.zookeeper.KeeperException;
/** /**
* Provides the globally barriered procedure framework and environment * Provides the globally barriered procedure framework and environment
* for region server oriented operations. {@link HRegionServer} interacts * for region server oriented operations.
* {@link org.apache.hadoop.hbase.regionserver.HRegionServer} interacts
* with the loaded procedure manager through this class. * with the loaded procedure manager through this class.
*/ */
public class RegionServerProcedureManagerHost extends public class RegionServerProcedureManagerHost extends

View File

@ -23,8 +23,10 @@ import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
/** /**
* Wraps together the mutations which are applied as a batch to the region and their operation * Wraps together the mutations which are applied as a batch to the region and their operation
* status and WALEdits. * status and WALEdits.
* @see RegionObserver#preBatchMutate(ObserverContext, MiniBatchOperationInProgress) * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
* @see RegionObserver#postBatchMutate(ObserverContext, MiniBatchOperationInProgress) * ObserverContext, MiniBatchOperationInProgress)
* @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
* ObserverContext, MiniBatchOperationInProgress)
* @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids . * @param <T> Pair<Mutation, Integer> pair of Mutations and associated rowlock ids .
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@ -61,7 +63,8 @@ public class MiniBatchOperationInProgress<T> {
/** /**
* Sets the status code for the operation(Mutation) at the specified position. * Sets the status code for the operation(Mutation) at the specified position.
* By setting this status, {@link RegionObserver} can make HRegion to skip Mutations. * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
* can make HRegion to skip Mutations.
* @param index * @param index
* @param opStatus * @param opStatus
*/ */

View File

@ -54,7 +54,8 @@ public interface RegionScanner extends InternalScanner {
boolean reseek(byte[] row) throws IOException; boolean reseek(byte[] row) throws IOException;
/** /**
* @return The preferred max buffersize. See {@link Scan#setMaxResultSize(long)} * @return The preferred max buffersize. See
* {@link org.apache.hadoop.hbase.client.Scan#setMaxResultSize(long)}
*/ */
long getMaxResultSize(); long getMaxResultSize();

View File

@ -45,9 +45,9 @@ import com.google.common.annotations.VisibleForTesting;
/** /**
* This worker is spawned in every regionserver, including master. The Worker waits for log * This worker is spawned in every regionserver, including master. The Worker waits for log
* splitting tasks to be put up by the {@link SplitLogManager} running in the master and races with * splitting tasks to be put up by the {@link org.apache.hadoop.hbase.master.SplitLogManager}
* other workers in other serves to acquire those tasks. The coordination is done via coordination * running in the master and races with other workers in other serves to acquire those tasks.
* engine. * The coordination is done via coordination engine.
* <p> * <p>
* If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task. * If a worker has successfully moved the task from state UNASSIGNED to OWNED then it owns the task.
* It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED * It keeps heart beating the manager by periodically moving the task from UNASSIGNED to OWNED
@ -186,7 +186,8 @@ public class SplitLogWorker implements Runnable {
* acquired by a {@link SplitLogWorker}. Since there isn't a water-tight * acquired by a {@link SplitLogWorker}. Since there isn't a water-tight
* guarantee that two workers will not be executing the same task therefore it * guarantee that two workers will not be executing the same task therefore it
* is better to have workers prepare the task and then have the * is better to have workers prepare the task and then have the
* {@link SplitLogManager} commit the work in SplitLogManager.TaskFinisher * {@link org.apache.hadoop.hbase.master.SplitLogManager} commit the work in
* SplitLogManager.TaskFinisher
*/ */
public interface TaskExecutor { public interface TaskExecutor {
enum Status { enum Status {

View File

@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.replication.regionserver.ReplicationSinkManager.S
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
/** /**
* A {@link ReplicationEndpoint} implementation for replicating to another HBase cluster. * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint}
* implementation for replicating to another HBase cluster.
* For the slave cluster it selects a random number of peers * For the slave cluster it selects a random number of peers
* using a replication ratio. For example, if replication ration = 0.1 * using a replication ratio. For example, if replication ration = 0.1
* and slave cluster has 100 region servers, 10 will be selected. * and slave cluster has 100 region servers, 10 will be selected.

View File

@ -79,8 +79,9 @@ import com.google.common.cache.CacheBuilder;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
/** /**
* A {@link ReplicationEndpoint} endpoint which receives the WAL edits from the * A {@link org.apache.hadoop.hbase.replication.ReplicationEndpoint} endpoint
* WAL, and sends the edits to replicas of regions. * which receives the WAL edits from the WAL, and sends the edits to replicas
* of regions.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint { public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.regionserver.BloomType;
/** /**
* Handles Bloom filter initialization based on configuration and serialized * Handles Bloom filter initialization based on configuration and serialized
* metadata in the reader and writer of {@link StoreFile}. * metadata in the reader and writer of {@link org.apache.hadoop.hbase.regionserver.StoreFile}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public final class BloomFilterFactory { public final class BloomFilterFactory {

View File

@ -1706,7 +1706,7 @@ public abstract class FSUtils {
* This function is to scan the root path of the file system to get the * This function is to scan the root path of the file system to get the
* degree of locality for each region on each of the servers having at least * degree of locality for each region on each of the servers having at least
* one block of that region. * one block of that region.
* This is used by the tool {@link RegionPlacementMaintainer} * This is used by the tool {@link org.apache.hadoop.hbase.master.RegionPlacementMaintainer}
* *
* @param conf * @param conf
* the configuration to use * the configuration to use

View File

@ -134,7 +134,8 @@ public class HFileArchiveUtil {
} }
/** /**
* Get the full path to the archive directory on the configured {@link FileSystem} * Get the full path to the archive directory on the configured
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
* @param conf to look for archive directory name and root directory. Cannot be null. Notes for * @param conf to look for archive directory name and root directory. Cannot be null. Notes for
* testing: requires a FileSystem root directory to be specified. * testing: requires a FileSystem root directory to be specified.
* @return the full {@link Path} to the archive directory, as defined by the configuration * @return the full {@link Path} to the archive directory, as defined by the configuration
@ -145,7 +146,8 @@ public class HFileArchiveUtil {
} }
/** /**
* Get the full path to the archive directory on the configured {@link FileSystem} * Get the full path to the archive directory on the configured
* {@link org.apache.hadoop.hbase.master.MasterFileSystem}
* @param rootdir {@link Path} to the root directory where hbase files are stored (for building * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
* the archive path) * the archive path)
* @return the full {@link Path} to the archive directory, as defined by the configuration * @return the full {@link Path} to the archive directory, as defined by the configuration

View File

@ -28,9 +28,9 @@ import org.apache.zookeeper.KeeperException;
/** /**
* Tracker on cluster settings up in zookeeper. * Tracker on cluster settings up in zookeeper.
* This is not related to {@link ClusterStatus}. That class is a data structure * This is not related to {@link org.apache.hadoop.hbase.ClusterStatus}. That class
* that holds snapshot of current view on cluster. This class is about tracking * is a data structure that holds snapshot of current view on cluster. This class
* cluster attributes up in zookeeper. * is about tracking cluster attributes up in zookeeper.
* *
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -35,7 +35,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSeq
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
/** /**
* Common methods and attributes used by {@link SplitLogManager} and {@link SplitLogWorker} * Common methods and attributes used by {@link org.apache.hadoop.hbase.master.SplitLogManager}
* and {@link org.apache.hadoop.hbase.regionserver.SplitLogWorker}
* running distributed splitting of WAL logs. * running distributed splitting of WAL logs.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private